2024-11-20 22:23:30,858 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 22:23:30,906 main DEBUG Took 0.044423 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 22:23:30,908 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 22:23:30,908 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 22:23:30,915 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 22:23:30,917 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:30,950 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 22:23:31,000 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,025 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,040 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,042 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,047 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,048 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,053 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,054 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,062 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,063 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,065 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,066 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,068 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,073 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,074 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,082 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,083 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,084 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,085 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,086 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,090 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,091 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,097 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,098 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 22:23:31,099 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,099 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 22:23:31,109 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 22:23:31,123 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 22:23:31,126 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 22:23:31,127 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 22:23:31,140 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 22:23:31,141 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 22:23:31,165 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 22:23:31,171 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 22:23:31,174 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 22:23:31,175 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 22:23:31,176 main DEBUG createAppenders(={Console}) 2024-11-20 22:23:31,179 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 22:23:31,180 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 22:23:31,180 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 22:23:31,182 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 22:23:31,183 main DEBUG OutputStream closed 2024-11-20 22:23:31,184 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 22:23:31,184 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 22:23:31,185 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 22:23:31,410 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 22:23:31,417 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 22:23:31,431 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 22:23:31,433 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 22:23:31,434 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 22:23:31,435 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 22:23:31,435 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 22:23:31,436 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 22:23:31,436 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 22:23:31,437 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 22:23:31,437 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 22:23:31,438 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 22:23:31,440 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 22:23:31,440 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 22:23:31,441 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 22:23:31,441 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 22:23:31,442 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 22:23:31,444 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 22:23:31,447 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 22:23:31,448 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 22:23:31,448 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 22:23:31,450 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T22:23:32,138 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c 2024-11-20 22:23:32,144 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 22:23:32,145 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T22:23:32,176 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-20T22:23:32,241 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T22:23:32,249 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252, deleteOnExit=true 2024-11-20T22:23:32,254 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T22:23:32,255 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/test.cache.data in system properties and HBase conf 2024-11-20T22:23:32,257 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T22:23:32,259 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/hadoop.log.dir in system properties and HBase conf 2024-11-20T22:23:32,262 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T22:23:32,264 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T22:23:32,265 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T22:23:32,528 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T22:23:32,758 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T22:23:32,777 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T22:23:32,778 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T22:23:32,780 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T22:23:32,781 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T22:23:32,784 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T22:23:32,785 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T22:23:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T22:23:32,796 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T22:23:32,797 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T22:23:32,797 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/nfs.dump.dir in system properties and HBase conf 2024-11-20T22:23:32,797 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/java.io.tmpdir in system properties and HBase conf 2024-11-20T22:23:32,798 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T22:23:32,798 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T22:23:32,799 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T22:23:34,827 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T22:23:35,026 INFO [Time-limited test {}] log.Log(170): Logging initialized @6432ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T22:23:35,188 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T22:23:35,315 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T22:23:35,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T22:23:35,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T22:23:35,467 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T22:23:35,521 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T22:23:35,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ff7a6fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/hadoop.log.dir/,AVAILABLE} 2024-11-20T22:23:35,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e8672f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T22:23:36,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5786ce49{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/java.io.tmpdir/jetty-localhost-33443-hadoop-hdfs-3_4_1-tests_jar-_-any-10971700492721266273/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T22:23:36,119 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b71dca4{HTTP/1.1, (http/1.1)}{localhost:33443} 2024-11-20T22:23:36,120 INFO [Time-limited test {}] server.Server(415): Started @7526ms 2024-11-20T22:23:37,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T22:23:37,022 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T22:23:37,026 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T22:23:37,026 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T22:23:37,026 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T22:23:37,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1535b9ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/hadoop.log.dir/,AVAILABLE} 2024-11-20T22:23:37,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@608de7d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T22:23:37,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d30b7f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/java.io.tmpdir/jetty-localhost-33043-hadoop-hdfs-3_4_1-tests_jar-_-any-6559940009164624500/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T22:23:37,178 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61692e60{HTTP/1.1, (http/1.1)}{localhost:33043} 2024-11-20T22:23:37,179 INFO [Time-limited test {}] server.Server(415): Started @8585ms 2024-11-20T22:23:37,351 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T22:23:38,793 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/dfs/data/data1/current/BP-1651373866-172.17.0.2-1732141414202/current, will proceed with Du for space computation calculation, 2024-11-20T22:23:38,803 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/dfs/data/data2/current/BP-1651373866-172.17.0.2-1732141414202/current, will proceed with Du for space computation calculation, 2024-11-20T22:23:38,946 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T22:23:39,033 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea2707017cf5c79e with lease ID 0xb02658fda9e51009: Processing first storage report for DS-7ec4f6a4-2f27-42ed-826d-4eb32d84b5e8 from datanode DatanodeRegistration(127.0.0.1:39471, datanodeUuid=38ad54c8-a252-4485-8e9a-2cc07cf514a6, infoPort=34063, infoSecurePort=0, ipcPort=42351, storageInfo=lv=-57;cid=testClusterID;nsid=801802784;c=1732141414202) 2024-11-20T22:23:39,035 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea2707017cf5c79e with lease ID 0xb02658fda9e51009: from storage DS-7ec4f6a4-2f27-42ed-826d-4eb32d84b5e8 node DatanodeRegistration(127.0.0.1:39471, datanodeUuid=38ad54c8-a252-4485-8e9a-2cc07cf514a6, infoPort=34063, infoSecurePort=0, ipcPort=42351, storageInfo=lv=-57;cid=testClusterID;nsid=801802784;c=1732141414202), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-20T22:23:39,036 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea2707017cf5c79e with lease ID 0xb02658fda9e51009: Processing first storage report for DS-3ec57f8a-de68-4e46-9f55-bc454cb44238 from datanode DatanodeRegistration(127.0.0.1:39471, datanodeUuid=38ad54c8-a252-4485-8e9a-2cc07cf514a6, infoPort=34063, infoSecurePort=0, ipcPort=42351, storageInfo=lv=-57;cid=testClusterID;nsid=801802784;c=1732141414202) 2024-11-20T22:23:39,036 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea2707017cf5c79e with lease ID 0xb02658fda9e51009: from storage DS-3ec57f8a-de68-4e46-9f55-bc454cb44238 node DatanodeRegistration(127.0.0.1:39471, datanodeUuid=38ad54c8-a252-4485-8e9a-2cc07cf514a6, infoPort=34063, infoSecurePort=0, ipcPort=42351, storageInfo=lv=-57;cid=testClusterID;nsid=801802784;c=1732141414202), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T22:23:39,179 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c 2024-11-20T22:23:39,391 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/zookeeper_0, clientPort=51916, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T22:23:39,434 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51916 2024-11-20T22:23:39,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:39,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:40,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741825_1001 (size=7) 2024-11-20T22:23:40,117 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 with version=8 2024-11-20T22:23:40,117 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/hbase-staging 2024-11-20T22:23:40,450 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T22:23:40,884 INFO [Time-limited test {}] client.ConnectionUtils(129): master/6365a1e51efd:0 server-side Connection retries=45 2024-11-20T22:23:40,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:40,917 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:40,917 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T22:23:40,917 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:40,917 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T22:23:41,217 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T22:23:41,314 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T22:23:41,338 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T22:23:41,345 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T22:23:41,384 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 52252 (auto-detected) 2024-11-20T22:23:41,386 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T22:23:41,427 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35073 2024-11-20T22:23:41,438 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:41,441 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:41,490 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35073 connecting to ZooKeeper ensemble=127.0.0.1:51916 2024-11-20T22:23:41,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350730x0, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T22:23:41,663 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35073-0x1015ba22db50000 connected 2024-11-20T22:23:41,833 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T22:23:41,839 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T22:23:41,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T22:23:41,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35073 2024-11-20T22:23:41,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35073 2024-11-20T22:23:41,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35073 2024-11-20T22:23:41,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35073 2024-11-20T22:23:41,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35073 2024-11-20T22:23:41,923 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72, hbase.cluster.distributed=false 2024-11-20T22:23:42,055 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6365a1e51efd:0 server-side Connection retries=45 2024-11-20T22:23:42,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:42,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:42,057 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T22:23:42,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T22:23:42,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T22:23:42,061 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T22:23:42,068 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T22:23:42,075 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46811 2024-11-20T22:23:42,078 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T22:23:42,094 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T22:23:42,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:42,102 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:42,109 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46811 connecting to ZooKeeper ensemble=127.0.0.1:51916 2024-11-20T22:23:42,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468110x0, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T22:23:42,130 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468110x0, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T22:23:42,132 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468110x0, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T22:23:42,134 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46811-0x1015ba22db50001 connected 2024-11-20T22:23:42,134 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T22:23:42,183 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46811 2024-11-20T22:23:42,187 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46811 2024-11-20T22:23:42,196 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46811 2024-11-20T22:23:42,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46811 2024-11-20T22:23:42,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46811 2024-11-20T22:23:42,206 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/6365a1e51efd,35073,1732141420438 2024-11-20T22:23:42,227 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6365a1e51efd:35073 2024-11-20T22:23:42,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T22:23:42,254 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6365a1e51efd,35073,1732141420438 2024-11-20T22:23:42,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T22:23:42,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T22:23:42,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:42,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T22:23:42,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:42,299 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T22:23:42,300 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T22:23:42,301 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6365a1e51efd,35073,1732141420438 from backup master directory 2024-11-20T22:23:42,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6365a1e51efd,35073,1732141420438 2024-11-20T22:23:42,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T22:23:42,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T22:23:42,313 WARN [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T22:23:42,313 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6365a1e51efd,35073,1732141420438 2024-11-20T22:23:42,316 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T22:23:42,317 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T22:23:42,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741826_1002 (size=42) 2024-11-20T22:23:42,407 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/hbase.id with ID: dc403baa-6892-4869-aee0-dcf4cf15572c 2024-11-20T22:23:42,506 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T22:23:42,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:42,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:42,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741827_1003 (size=196) 2024-11-20T22:23:43,017 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:23:43,020 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T22:23:43,069 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:43,077 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T22:23:43,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741828_1004 (size=1189) 2024-11-20T22:23:43,167 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store 2024-11-20T22:23:43,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741829_1005 (size=34) 2024-11-20T22:23:43,225 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T22:23:43,226 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:43,227 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T22:23:43,227 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:23:43,227 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:23:43,228 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T22:23:43,228 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:23:43,228 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:23:43,228 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T22:23:43,236 WARN [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/.initializing 2024-11-20T22:23:43,237 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/WALs/6365a1e51efd,35073,1732141420438 2024-11-20T22:23:43,250 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T22:23:43,271 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6365a1e51efd%2C35073%2C1732141420438, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/WALs/6365a1e51efd,35073,1732141420438, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/oldWALs, maxLogs=10 2024-11-20T22:23:43,303 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/WALs/6365a1e51efd,35073,1732141420438/6365a1e51efd%2C35073%2C1732141420438.1732141423278, exclude list is [], retry=0 2024-11-20T22:23:43,330 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39471,DS-7ec4f6a4-2f27-42ed-826d-4eb32d84b5e8,DISK] 2024-11-20T22:23:43,334 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T22:23:43,385 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/WALs/6365a1e51efd,35073,1732141420438/6365a1e51efd%2C35073%2C1732141420438.1732141423278 2024-11-20T22:23:43,386 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063)] 2024-11-20T22:23:43,387 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:43,388 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:43,392 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,393 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T22:23:43,498 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:43,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:43,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T22:23:43,508 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:43,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:43,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,517 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T22:23:43,517 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:43,519 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:43,519 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T22:23:43,523 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:43,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:43,529 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,531 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,542 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T22:23:43,548 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T22:23:43,556 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:43,558 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73187672, jitterRate=0.09058129787445068}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T22:23:43,568 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T22:23:43,569 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T22:23:43,618 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2279cfb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:43,665 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T22:23:43,682 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T22:23:43,683 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T22:23:43,686 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T22:23:43,688 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T22:23:43,697 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 9 msec 2024-11-20T22:23:43,697 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T22:23:43,741 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T22:23:43,763 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T22:23:43,804 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T22:23:43,807 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T22:23:43,809 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T22:23:43,820 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T22:23:43,823 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T22:23:43,828 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T22:23:43,837 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T22:23:43,840 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T22:23:43,851 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T22:23:43,873 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T22:23:43,893 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T22:23:43,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T22:23:43,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T22:23:43,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:43,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:43,914 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=6365a1e51efd,35073,1732141420438, sessionid=0x1015ba22db50000, setting cluster-up flag (Was=false) 2024-11-20T22:23:43,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:43,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:43,970 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T22:23:43,973 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6365a1e51efd,35073,1732141420438 2024-11-20T22:23:44,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:44,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:44,034 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T22:23:44,036 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6365a1e51efd,35073,1732141420438 2024-11-20T22:23:44,147 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6365a1e51efd:46811 2024-11-20T22:23:44,148 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T22:23:44,153 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1008): ClusterId : dc403baa-6892-4869-aee0-dcf4cf15572c 2024-11-20T22:23:44,157 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T22:23:44,157 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T22:23:44,159 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T22:23:44,166 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6365a1e51efd,35073,1732141420438 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T22:23:44,172 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T22:23:44,172 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T22:23:44,173 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6365a1e51efd:0, corePoolSize=5, maxPoolSize=5 2024-11-20T22:23:44,173 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6365a1e51efd:0, corePoolSize=5, maxPoolSize=5 2024-11-20T22:23:44,173 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6365a1e51efd:0, corePoolSize=5, maxPoolSize=5 2024-11-20T22:23:44,173 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6365a1e51efd:0, corePoolSize=5, maxPoolSize=5 2024-11-20T22:23:44,173 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6365a1e51efd:0, corePoolSize=10, maxPoolSize=10 2024-11-20T22:23:44,173 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,174 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6365a1e51efd:0, corePoolSize=2, maxPoolSize=2 2024-11-20T22:23:44,174 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,180 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T22:23:44,180 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T22:23:44,180 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T22:23:44,181 DEBUG [RS:0;6365a1e51efd:46811 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76b567e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:44,186 DEBUG [RS:0;6365a1e51efd:46811 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60424e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6365a1e51efd/172.17.0.2:0 2024-11-20T22:23:44,188 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732141454188 2024-11-20T22:23:44,190 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T22:23:44,186 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:44,190 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T22:23:44,190 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T22:23:44,190 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T22:23:44,190 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T22:23:44,191 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T22:23:44,193 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(3073): reportForDuty to master=6365a1e51efd,35073,1732141420438 with isa=6365a1e51efd/172.17.0.2:46811, startcode=1732141422048 2024-11-20T22:23:44,196 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T22:23:44,196 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T22:23:44,196 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T22:23:44,197 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T22:23:44,197 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,199 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T22:23:44,200 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T22:23:44,201 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T22:23:44,209 DEBUG [RS:0;6365a1e51efd:46811 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T22:23:44,215 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T22:23:44,215 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T22:23:44,219 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.large.0-1732141424217,5,FailOnTimeoutGroup] 2024-11-20T22:23:44,222 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.small.0-1732141424219,5,FailOnTimeoutGroup] 2024-11-20T22:23:44,222 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,222 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T22:23:44,224 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,225 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741831_1007 (size=1039) 2024-11-20T22:23:44,229 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T22:23:44,229 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:23:44,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741832_1008 (size=32) 2024-11-20T22:23:44,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:44,248 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37945, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T22:23:44,255 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35073 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:44,257 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35073 {}] master.ServerManager(486): Registering regionserver=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:44,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T22:23:44,263 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T22:23:44,263 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:44,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:44,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T22:23:44,274 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T22:23:44,274 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:44,276 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:23:44,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:44,276 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46027 2024-11-20T22:23:44,276 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T22:23:44,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T22:23:44,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T22:23:44,286 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T22:23:44,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:44,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:44,290 DEBUG [RS:0;6365a1e51efd:46811 {}] zookeeper.ZKUtil(111): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6365a1e51efd,46811,1732141422048 2024-11-20T22:23:44,290 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6365a1e51efd,46811,1732141422048] 2024-11-20T22:23:44,290 WARN [RS:0;6365a1e51efd:46811 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T22:23:44,290 INFO [RS:0;6365a1e51efd:46811 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T22:23:44,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740 2024-11-20T22:23:44,291 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/WALs/6365a1e51efd,46811,1732141422048 2024-11-20T22:23:44,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740 2024-11-20T22:23:44,299 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:23:44,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T22:23:44,323 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:44,327 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75408530, jitterRate=0.1236746609210968}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:23:44,337 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T22:23:44,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T22:23:44,338 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T22:23:44,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T22:23:44,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T22:23:44,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T22:23:44,339 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T22:23:44,340 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T22:23:44,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T22:23:44,346 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T22:23:44,346 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T22:23:44,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T22:23:44,363 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T22:23:44,372 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T22:23:44,376 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T22:23:44,398 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T22:23:44,401 INFO [RS:0;6365a1e51efd:46811 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T22:23:44,402 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,402 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T22:23:44,411 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,411 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,412 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,412 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,412 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,412 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,413 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6365a1e51efd:0, corePoolSize=2, maxPoolSize=2 2024-11-20T22:23:44,413 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,413 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,413 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,413 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,413 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6365a1e51efd:0, corePoolSize=1, maxPoolSize=1 2024-11-20T22:23:44,414 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6365a1e51efd:0, corePoolSize=3, maxPoolSize=3 2024-11-20T22:23:44,414 DEBUG [RS:0;6365a1e51efd:46811 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0, corePoolSize=3, maxPoolSize=3 2024-11-20T22:23:44,422 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,423 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,423 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,423 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,423 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,46811,1732141422048-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T22:23:44,442 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T22:23:44,443 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,46811,1732141422048-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:44,470 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.Replication(204): 6365a1e51efd,46811,1732141422048 started 2024-11-20T22:23:44,470 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1767): Serving as 6365a1e51efd,46811,1732141422048, RpcServer on 6365a1e51efd/172.17.0.2:46811, sessionid=0x1015ba22db50001 2024-11-20T22:23:44,471 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T22:23:44,471 DEBUG [RS:0;6365a1e51efd:46811 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:44,471 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6365a1e51efd,46811,1732141422048' 2024-11-20T22:23:44,471 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T22:23:44,473 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T22:23:44,474 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T22:23:44,474 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T22:23:44,474 DEBUG [RS:0;6365a1e51efd:46811 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:44,475 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6365a1e51efd,46811,1732141422048' 2024-11-20T22:23:44,475 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T22:23:44,476 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T22:23:44,477 DEBUG [RS:0;6365a1e51efd:46811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T22:23:44,477 INFO [RS:0;6365a1e51efd:46811 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T22:23:44,477 INFO [RS:0;6365a1e51efd:46811 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T22:23:44,527 WARN [6365a1e51efd:35073 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-20T22:23:44,584 INFO [RS:0;6365a1e51efd:46811 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T22:23:44,592 INFO [RS:0;6365a1e51efd:46811 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6365a1e51efd%2C46811%2C1732141422048, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/WALs/6365a1e51efd,46811,1732141422048, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/oldWALs, maxLogs=32 2024-11-20T22:23:44,615 DEBUG [RS:0;6365a1e51efd:46811 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/WALs/6365a1e51efd,46811,1732141422048/6365a1e51efd%2C46811%2C1732141422048.1732141424595, exclude list is [], retry=0 2024-11-20T22:23:44,621 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39471,DS-7ec4f6a4-2f27-42ed-826d-4eb32d84b5e8,DISK] 2024-11-20T22:23:44,629 INFO [RS:0;6365a1e51efd:46811 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/WALs/6365a1e51efd,46811,1732141422048/6365a1e51efd%2C46811%2C1732141422048.1732141424595 2024-11-20T22:23:44,629 DEBUG [RS:0;6365a1e51efd:46811 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063)] 2024-11-20T22:23:44,779 DEBUG [6365a1e51efd:35073 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T22:23:44,786 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:44,794 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6365a1e51efd,46811,1732141422048, state=OPENING 2024-11-20T22:23:44,836 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T22:23:44,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:44,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:44,847 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T22:23:44,847 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T22:23:44,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:23:45,062 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:45,068 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T22:23:45,079 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47962, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T22:23:45,106 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T22:23:45,106 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T22:23:45,109 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T22:23:45,115 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6365a1e51efd%2C46811%2C1732141422048.meta, suffix=.meta, logDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/WALs/6365a1e51efd,46811,1732141422048, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/oldWALs, maxLogs=32 2024-11-20T22:23:45,138 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/WALs/6365a1e51efd,46811,1732141422048/6365a1e51efd%2C46811%2C1732141422048.meta.1732141425118.meta, exclude list is [], retry=0 2024-11-20T22:23:45,146 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39471,DS-7ec4f6a4-2f27-42ed-826d-4eb32d84b5e8,DISK] 2024-11-20T22:23:45,161 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/WALs/6365a1e51efd,46811,1732141422048/6365a1e51efd%2C46811%2C1732141422048.meta.1732141425118.meta 2024-11-20T22:23:45,162 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063)] 2024-11-20T22:23:45,162 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:45,165 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T22:23:45,243 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T22:23:45,249 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T22:23:45,254 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T22:23:45,254 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:45,255 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T22:23:45,255 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T22:23:45,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T22:23:45,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T22:23:45,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:45,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:45,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T22:23:45,298 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T22:23:45,298 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:45,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:45,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T22:23:45,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T22:23:45,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:45,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T22:23:45,306 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740 2024-11-20T22:23:45,309 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740 2024-11-20T22:23:45,312 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:23:45,315 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T22:23:45,317 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71667228, jitterRate=0.06792491674423218}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:23:45,318 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T22:23:45,326 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732141425055 2024-11-20T22:23:45,338 DEBUG [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T22:23:45,339 INFO [RS_OPEN_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T22:23:45,339 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:45,342 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6365a1e51efd,46811,1732141422048, state=OPEN 2024-11-20T22:23:45,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T22:23:45,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T22:23:45,405 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T22:23:45,405 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T22:23:45,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T22:23:45,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=6365a1e51efd,46811,1732141422048 in 554 msec 2024-11-20T22:23:45,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T22:23:45,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0590 sec 2024-11-20T22:23:45,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.3480 sec 2024-11-20T22:23:45,434 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732141425433, completionTime=-1 2024-11-20T22:23:45,434 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T22:23:45,434 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T22:23:45,474 DEBUG [hconnection-0x2514ba89-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:45,478 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:45,498 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T22:23:45,498 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732141485498 2024-11-20T22:23:45,499 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732141545499 2024-11-20T22:23:45,499 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 64 msec 2024-11-20T22:23:45,547 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,35073,1732141420438-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:45,548 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,35073,1732141420438-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:45,548 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,35073,1732141420438-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:45,550 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6365a1e51efd:35073, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:45,551 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:45,557 DEBUG [master/6365a1e51efd:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T22:23:45,561 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T22:23:45,564 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T22:23:45,574 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T22:23:45,579 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:23:45,581 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:45,585 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:23:45,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741835_1011 (size=358) 2024-11-20T22:23:45,648 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 523fbb796d2a39aa16176c6f447c7951, NAME => 'hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:23:45,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741836_1012 (size=42) 2024-11-20T22:23:45,709 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:45,709 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 523fbb796d2a39aa16176c6f447c7951, disabling compactions & flushes 2024-11-20T22:23:45,710 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:23:45,710 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:23:45,710 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. after waiting 0 ms 2024-11-20T22:23:45,710 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:23:45,710 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:23:45,710 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 523fbb796d2a39aa16176c6f447c7951: 2024-11-20T22:23:45,713 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:23:45,723 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732141425714"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141425714"}]},"ts":"1732141425714"} 2024-11-20T22:23:45,751 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:23:45,755 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:23:45,758 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141425755"}]},"ts":"1732141425755"} 2024-11-20T22:23:45,763 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T22:23:45,779 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=523fbb796d2a39aa16176c6f447c7951, ASSIGN}] 2024-11-20T22:23:45,782 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=523fbb796d2a39aa16176c6f447c7951, ASSIGN 2024-11-20T22:23:45,785 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=523fbb796d2a39aa16176c6f447c7951, ASSIGN; state=OFFLINE, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=false 2024-11-20T22:23:45,937 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=523fbb796d2a39aa16176c6f447c7951, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:45,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 523fbb796d2a39aa16176c6f447c7951, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:23:46,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:46,119 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:23:46,119 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 523fbb796d2a39aa16176c6f447c7951, NAME => 'hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:46,120 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:23:46,120 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:46,120 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:23:46,120 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:23:46,140 INFO [StoreOpener-523fbb796d2a39aa16176c6f447c7951-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:23:46,147 INFO [StoreOpener-523fbb796d2a39aa16176c6f447c7951-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 523fbb796d2a39aa16176c6f447c7951 columnFamilyName info 2024-11-20T22:23:46,147 DEBUG [StoreOpener-523fbb796d2a39aa16176c6f447c7951-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:46,148 INFO [StoreOpener-523fbb796d2a39aa16176c6f447c7951-1 {}] regionserver.HStore(327): Store=523fbb796d2a39aa16176c6f447c7951/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:46,150 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:23:46,151 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:23:46,158 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:23:46,168 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:46,171 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 523fbb796d2a39aa16176c6f447c7951; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61209601, jitterRate=-0.0879058688879013}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T22:23:46,173 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 523fbb796d2a39aa16176c6f447c7951: 2024-11-20T22:23:46,177 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951., pid=6, masterSystemTime=1732141426099 2024-11-20T22:23:46,182 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:23:46,183 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:23:46,183 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=523fbb796d2a39aa16176c6f447c7951, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:46,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T22:23:46,196 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 523fbb796d2a39aa16176c6f447c7951, server=6365a1e51efd,46811,1732141422048 in 246 msec 2024-11-20T22:23:46,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T22:23:46,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=523fbb796d2a39aa16176c6f447c7951, ASSIGN in 417 msec 2024-11-20T22:23:46,208 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:23:46,209 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141426208"}]},"ts":"1732141426208"} 2024-11-20T22:23:46,214 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T22:23:46,256 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:23:46,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 691 msec 2024-11-20T22:23:46,279 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T22:23:46,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T22:23:46,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:46,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:23:46,328 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T22:23:46,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T22:23:46,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 51 msec 2024-11-20T22:23:46,384 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T22:23:46,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T22:23:46,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 39 msec 2024-11-20T22:23:46,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T22:23:46,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T22:23:46,461 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.148sec 2024-11-20T22:23:46,462 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T22:23:46,463 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T22:23:46,464 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T22:23:46,465 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T22:23:46,465 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T22:23:46,466 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,35073,1732141420438-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T22:23:46,466 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,35073,1732141420438-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T22:23:46,472 DEBUG [master/6365a1e51efd:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T22:23:46,473 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T22:23:46,474 INFO [master/6365a1e51efd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6365a1e51efd,35073,1732141420438-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T22:23:46,565 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d6cd448 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28474c50 2024-11-20T22:23:46,566 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T22:23:46,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68979b71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:46,588 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T22:23:46,588 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T22:23:46,601 DEBUG [hconnection-0x710d881f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:46,608 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:46,617 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=6365a1e51efd,35073,1732141420438 2024-11-20T22:23:46,630 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=220, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=710, ProcessCount=11, AvailableMemoryMB=2578 2024-11-20T22:23:46,640 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:23:46,647 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:23:46,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:23:46,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:23:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:23:46,668 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:23:46,668 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:46,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T22:23:46,671 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:23:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741837_1013 (size=963) 2024-11-20T22:23:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:46,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:47,088 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:23:47,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741838_1014 (size=53) 2024-11-20T22:23:47,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:47,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 175bc25ef8aacc6207ddcddcc7da4d90, disabling compactions & flushes 2024-11-20T22:23:47,101 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:47,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:47,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. after waiting 0 ms 2024-11-20T22:23:47,102 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:47,102 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:47,102 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:47,105 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:23:47,106 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141427105"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141427105"}]},"ts":"1732141427105"} 2024-11-20T22:23:47,109 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:23:47,111 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:23:47,111 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141427111"}]},"ts":"1732141427111"} 2024-11-20T22:23:47,115 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:23:47,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=175bc25ef8aacc6207ddcddcc7da4d90, ASSIGN}] 2024-11-20T22:23:47,140 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=175bc25ef8aacc6207ddcddcc7da4d90, ASSIGN 2024-11-20T22:23:47,143 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=175bc25ef8aacc6207ddcddcc7da4d90, ASSIGN; state=OFFLINE, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=false 2024-11-20T22:23:47,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:47,294 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=175bc25ef8aacc6207ddcddcc7da4d90, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:47,297 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:23:47,451 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:47,457 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:47,457 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:23:47,458 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,458 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:23:47,458 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,458 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,461 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,464 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:47,464 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 175bc25ef8aacc6207ddcddcc7da4d90 columnFamilyName A 2024-11-20T22:23:47,464 DEBUG [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:47,466 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.HStore(327): Store=175bc25ef8aacc6207ddcddcc7da4d90/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:47,466 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,468 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:47,468 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 175bc25ef8aacc6207ddcddcc7da4d90 columnFamilyName B 2024-11-20T22:23:47,469 DEBUG [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:47,469 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.HStore(327): Store=175bc25ef8aacc6207ddcddcc7da4d90/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:47,470 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,472 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:23:47,472 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 175bc25ef8aacc6207ddcddcc7da4d90 columnFamilyName C 2024-11-20T22:23:47,472 DEBUG [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:23:47,473 INFO [StoreOpener-175bc25ef8aacc6207ddcddcc7da4d90-1 {}] regionserver.HStore(327): Store=175bc25ef8aacc6207ddcddcc7da4d90/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:23:47,474 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:47,475 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,476 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,479 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:23:47,483 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:47,486 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:23:47,487 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 175bc25ef8aacc6207ddcddcc7da4d90; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72864813, jitterRate=0.08577032387256622}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:23:47,489 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:47,491 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., pid=11, masterSystemTime=1732141427451 2024-11-20T22:23:47,494 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:47,495 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:47,496 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=175bc25ef8aacc6207ddcddcc7da4d90, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:47,497 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35073 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=6365a1e51efd,46811,1732141422048, table=TestAcidGuarantees, region=175bc25ef8aacc6207ddcddcc7da4d90. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-20T22:23:47,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T22:23:47,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 in 202 msec 2024-11-20T22:23:47,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T22:23:47,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=175bc25ef8aacc6207ddcddcc7da4d90, ASSIGN in 366 msec 2024-11-20T22:23:47,509 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:23:47,509 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141427509"}]},"ts":"1732141427509"} 2024-11-20T22:23:47,511 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:23:47,521 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:23:47,524 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 859 msec 2024-11-20T22:23:47,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T22:23:47,795 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T22:23:47,802 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78f1301b to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6810105b 2024-11-20T22:23:47,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c8ffb5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:47,819 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:47,831 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:47,844 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:23:47,849 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:23:47,861 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x73a92982 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@77b2a080 2024-11-20T22:23:47,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@356518ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:47,910 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4adb5511 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@280f5a6a 2024-11-20T22:23:47,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@713e47c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:47,924 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x16defd30 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18248649 2024-11-20T22:23:47,937 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b278282, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:47,939 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d1a674a to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6130b46e 2024-11-20T22:23:47,952 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154fc7c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:47,954 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b0bb6c to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2dad5d1 2024-11-20T22:23:47,962 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69c02bcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:47,965 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x74feaf47 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d0bfce9 2024-11-20T22:23:47,977 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21896651, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:47,979 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72105e62 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8e087d 2024-11-20T22:23:47,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6adf5dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:47,989 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x33a38638 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2382745a 2024-11-20T22:23:48,002 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7890f9e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:48,004 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x619ba81c to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@518b20d1 2024-11-20T22:23:48,020 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eab84f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:23:48,037 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:48,039 DEBUG [hconnection-0x1c8a5105-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,041 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,043 DEBUG [hconnection-0x4ee426b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T22:23:48,046 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:48,049 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:48,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:48,055 DEBUG [hconnection-0x7da3ac9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:48,057 DEBUG [hconnection-0x62bc4cf2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,057 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,059 DEBUG [hconnection-0x79bb3058-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,063 DEBUG [hconnection-0x2522abb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,071 DEBUG [hconnection-0x4c21a880-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,072 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,072 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40214, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,073 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,078 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,083 DEBUG [hconnection-0x72891238-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,089 DEBUG [hconnection-0x14eb5fc7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:23:48,093 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,134 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,143 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:23:48,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:48,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:23:48,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:48,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:48,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:48,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/7f13db094ee9405496c31b5eb6a85f11 is 50, key is test_row_0/A:col10/1732141428175/Put/seqid=0 2024-11-20T22:23:48,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:48,415 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:48,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:48,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141488408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141488408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741839_1015 (size=9657) 2024-11-20T22:23:48,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141488433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141488423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141488461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/7f13db094ee9405496c31b5eb6a85f11 2024-11-20T22:23:48,577 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/d29e9839c8bf4c148e69c37fe429308e is 50, key is test_row_0/B:col10/1732141428175/Put/seqid=0 2024-11-20T22:23:48,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141488603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141488604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141488606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141488611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141488612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741840_1016 (size=9657) 2024-11-20T22:23:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:48,750 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:48,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:48,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,758 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141488820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141488821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141488820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141488825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:48,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141488835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,918 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:48,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:48,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:48,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:48,920 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:48,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:49,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/d29e9839c8bf4c148e69c37fe429308e 2024-11-20T22:23:49,075 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:49,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:49,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:49,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:49,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:49,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:49,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:49,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/bf620aa465de42d7839018a0e9565789 is 50, key is test_row_0/C:col10/1732141428175/Put/seqid=0 2024-11-20T22:23:49,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141489129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141489141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141489143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141489151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141489142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741841_1017 (size=9657) 2024-11-20T22:23:49,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/bf620aa465de42d7839018a0e9565789 2024-11-20T22:23:49,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:49,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/7f13db094ee9405496c31b5eb6a85f11 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7f13db094ee9405496c31b5eb6a85f11 2024-11-20T22:23:49,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7f13db094ee9405496c31b5eb6a85f11, entries=100, sequenceid=13, filesize=9.4 K 2024-11-20T22:23:49,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/d29e9839c8bf4c148e69c37fe429308e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d29e9839c8bf4c148e69c37fe429308e 2024-11-20T22:23:49,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d29e9839c8bf4c148e69c37fe429308e, entries=100, sequenceid=13, filesize=9.4 K 2024-11-20T22:23:49,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/bf620aa465de42d7839018a0e9565789 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bf620aa465de42d7839018a0e9565789 2024-11-20T22:23:49,240 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:49,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:49,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:49,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:49,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:49,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:49,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:49,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bf620aa465de42d7839018a0e9565789, entries=100, sequenceid=13, filesize=9.4 K 2024-11-20T22:23:49,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 175bc25ef8aacc6207ddcddcc7da4d90 in 1073ms, sequenceid=13, compaction requested=false 2024-11-20T22:23:49,263 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T22:23:49,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:49,407 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T22:23:49,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:49,409 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:23:49,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:49,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:49,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:49,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:49,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:49,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:49,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/da765452131a476088017c633a9e39bd is 50, key is test_row_0/A:col10/1732141428414/Put/seqid=0 2024-11-20T22:23:49,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741842_1018 (size=12001) 2024-11-20T22:23:49,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:49,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141489685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141489686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141489697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141489701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141489704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141489805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141489816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141489820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141489822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:49,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141489826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:49,939 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/da765452131a476088017c633a9e39bd 2024-11-20T22:23:50,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/e6b7f2d7780c4cddbcf6d765f4e00a2c is 50, key is test_row_0/B:col10/1732141428414/Put/seqid=0 2024-11-20T22:23:50,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141490015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141490033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141490038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141490040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141490055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741843_1019 (size=12001) 2024-11-20T22:23:50,068 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/e6b7f2d7780c4cddbcf6d765f4e00a2c 2024-11-20T22:23:50,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/4a5c86b0096c4bf1ab03bada44a85384 is 50, key is test_row_0/C:col10/1732141428414/Put/seqid=0 2024-11-20T22:23:50,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741844_1020 (size=12001) 2024-11-20T22:23:50,142 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/4a5c86b0096c4bf1ab03bada44a85384 2024-11-20T22:23:50,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/da765452131a476088017c633a9e39bd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/da765452131a476088017c633a9e39bd 2024-11-20T22:23:50,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:50,184 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/da765452131a476088017c633a9e39bd, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:23:50,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/e6b7f2d7780c4cddbcf6d765f4e00a2c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e6b7f2d7780c4cddbcf6d765f4e00a2c 2024-11-20T22:23:50,222 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e6b7f2d7780c4cddbcf6d765f4e00a2c, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:23:50,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/4a5c86b0096c4bf1ab03bada44a85384 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/4a5c86b0096c4bf1ab03bada44a85384 2024-11-20T22:23:50,247 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/4a5c86b0096c4bf1ab03bada44a85384, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:23:50,257 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 175bc25ef8aacc6207ddcddcc7da4d90 in 848ms, sequenceid=38, compaction requested=false 2024-11-20T22:23:50,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:50,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:50,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T22:23:50,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T22:23:50,278 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T22:23:50,278 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2090 sec 2024-11-20T22:23:50,307 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.2560 sec 2024-11-20T22:23:50,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:50,343 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:23:50,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:50,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:50,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/8933cf7a71c3427296ba71bb453e034a is 50, key is test_row_0/A:col10/1732141429668/Put/seqid=0 2024-11-20T22:23:50,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741845_1021 (size=14341) 2024-11-20T22:23:50,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141490455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141490456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141490493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141490495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141490500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141490599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141490600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141490606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141490609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141490609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,779 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:23:50,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141490807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141490810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141490814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:50,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141490822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141490822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:50,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/8933cf7a71c3427296ba71bb453e034a 2024-11-20T22:23:50,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/c68d325e692b4284906b0423abaf28e6 is 50, key is test_row_0/B:col10/1732141429668/Put/seqid=0 2024-11-20T22:23:50,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741846_1022 (size=12001) 2024-11-20T22:23:50,931 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/c68d325e692b4284906b0423abaf28e6 2024-11-20T22:23:50,957 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T22:23:50,960 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T22:23:50,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d0399cc895474e96b560125f08aa8c49 is 50, key is test_row_0/C:col10/1732141429668/Put/seqid=0 2024-11-20T22:23:50,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741847_1023 (size=12001) 2024-11-20T22:23:51,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141491114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141491118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141491127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141491130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141491131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,311 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T22:23:51,312 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:51,335 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T22:23:51,335 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:51,338 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T22:23:51,338 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T22:23:51,339 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T22:23:51,339 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:51,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T22:23:51,341 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T22:23:51,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d0399cc895474e96b560125f08aa8c49 2024-11-20T22:23:51,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/8933cf7a71c3427296ba71bb453e034a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8933cf7a71c3427296ba71bb453e034a 2024-11-20T22:23:51,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8933cf7a71c3427296ba71bb453e034a, entries=200, sequenceid=51, filesize=14.0 K 2024-11-20T22:23:51,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/c68d325e692b4284906b0423abaf28e6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/c68d325e692b4284906b0423abaf28e6 2024-11-20T22:23:51,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/c68d325e692b4284906b0423abaf28e6, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T22:23:51,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d0399cc895474e96b560125f08aa8c49 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d0399cc895474e96b560125f08aa8c49 2024-11-20T22:23:51,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d0399cc895474e96b560125f08aa8c49, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T22:23:51,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 175bc25ef8aacc6207ddcddcc7da4d90 in 1183ms, sequenceid=51, compaction requested=true 2024-11-20T22:23:51,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:51,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:51,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:51,531 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:51,536 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:51,536 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:51,538 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:23:51,538 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:51,539 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d29e9839c8bf4c148e69c37fe429308e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e6b7f2d7780c4cddbcf6d765f4e00a2c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/c68d325e692b4284906b0423abaf28e6] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=32.9 K 2024-11-20T22:23:51,539 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:51,540 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:23:51,540 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:51,540 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7f13db094ee9405496c31b5eb6a85f11, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/da765452131a476088017c633a9e39bd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8933cf7a71c3427296ba71bb453e034a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=35.2 K 2024-11-20T22:23:51,540 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d29e9839c8bf4c148e69c37fe429308e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141428166 2024-11-20T22:23:51,541 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e6b7f2d7780c4cddbcf6d765f4e00a2c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141428385 2024-11-20T22:23:51,541 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f13db094ee9405496c31b5eb6a85f11, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141428166 2024-11-20T22:23:51,543 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c68d325e692b4284906b0423abaf28e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141429668 2024-11-20T22:23:51,543 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting da765452131a476088017c633a9e39bd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141428385 2024-11-20T22:23:51,546 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8933cf7a71c3427296ba71bb453e034a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141429668 2024-11-20T22:23:51,625 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#9 average throughput is 0.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:51,627 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/8f2e65538875446db7923b937efb8cfc is 50, key is test_row_0/B:col10/1732141429668/Put/seqid=0 2024-11-20T22:23:51,633 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#10 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:51,641 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/85e1e46af61048c2b171b39c038a9cb8 is 50, key is test_row_0/A:col10/1732141429668/Put/seqid=0 2024-11-20T22:23:51,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:51,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:23:51,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:51,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:51,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:51,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:51,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:51,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:51,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741848_1024 (size=12104) 2024-11-20T22:23:51,672 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/8f2e65538875446db7923b937efb8cfc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8f2e65538875446db7923b937efb8cfc 2024-11-20T22:23:51,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741849_1025 (size=12104) 2024-11-20T22:23:51,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/5237bffaf1ff482b8b847b9f25df87f2 is 50, key is test_row_0/A:col10/1732141430486/Put/seqid=0 2024-11-20T22:23:51,701 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/85e1e46af61048c2b171b39c038a9cb8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/85e1e46af61048c2b171b39c038a9cb8 2024-11-20T22:23:51,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141491670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,712 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into 8f2e65538875446db7923b937efb8cfc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:51,713 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:51,713 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=13, startTime=1732141431531; duration=0sec 2024-11-20T22:23:51,714 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:51,714 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:23:51,714 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:51,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141491676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141491678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141491679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,721 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:51,721 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:23:51,721 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:51,723 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bf620aa465de42d7839018a0e9565789, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/4a5c86b0096c4bf1ab03bada44a85384, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d0399cc895474e96b560125f08aa8c49] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=32.9 K 2024-11-20T22:23:51,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141491712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,724 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into 85e1e46af61048c2b171b39c038a9cb8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:51,724 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:51,724 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=13, startTime=1732141431527; duration=0sec 2024-11-20T22:23:51,725 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:51,725 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:23:51,725 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting bf620aa465de42d7839018a0e9565789, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141428166 2024-11-20T22:23:51,727 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a5c86b0096c4bf1ab03bada44a85384, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141428385 2024-11-20T22:23:51,729 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d0399cc895474e96b560125f08aa8c49, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141429668 2024-11-20T22:23:51,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741850_1026 (size=14341) 2024-11-20T22:23:51,781 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#12 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:51,782 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ecbe8d3a0c56418383b1640d87f8e644 is 50, key is test_row_0/C:col10/1732141429668/Put/seqid=0 2024-11-20T22:23:51,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741851_1027 (size=12104) 2024-11-20T22:23:51,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141491816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141491827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141491827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141491827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141491827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:51,850 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ecbe8d3a0c56418383b1640d87f8e644 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ecbe8d3a0c56418383b1640d87f8e644 2024-11-20T22:23:51,866 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into ecbe8d3a0c56418383b1640d87f8e644(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:51,866 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:51,867 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=13, startTime=1732141431532; duration=0sec 2024-11-20T22:23:51,867 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:51,867 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:23:52,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141492037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141492037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141492047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141492046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141492047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/5237bffaf1ff482b8b847b9f25df87f2 2024-11-20T22:23:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T22:23:52,181 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T22:23:52,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/bd6682dbbb514251bd51a5b7d48c4686 is 50, key is test_row_0/B:col10/1732141430486/Put/seqid=0 2024-11-20T22:23:52,188 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T22:23:52,192 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:52,196 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:52,196 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:52,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741852_1028 (size=12001) 2024-11-20T22:23:52,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/bd6682dbbb514251bd51a5b7d48c4686 2024-11-20T22:23:52,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/890a3fad22c64d4ca39c45fc26e30508 is 50, key is test_row_0/C:col10/1732141430486/Put/seqid=0 2024-11-20T22:23:52,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:52,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741853_1029 (size=12001) 2024-11-20T22:23:52,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/890a3fad22c64d4ca39c45fc26e30508 2024-11-20T22:23:52,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141492345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141492346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,353 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T22:23:52,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:52,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:52,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:52,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:52,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:52,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141492357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141492358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/5237bffaf1ff482b8b847b9f25df87f2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5237bffaf1ff482b8b847b9f25df87f2 2024-11-20T22:23:52,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141492358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5237bffaf1ff482b8b847b9f25df87f2, entries=200, sequenceid=76, filesize=14.0 K 2024-11-20T22:23:52,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/bd6682dbbb514251bd51a5b7d48c4686 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/bd6682dbbb514251bd51a5b7d48c4686 2024-11-20T22:23:52,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/bd6682dbbb514251bd51a5b7d48c4686, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T22:23:52,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/890a3fad22c64d4ca39c45fc26e30508 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/890a3fad22c64d4ca39c45fc26e30508 2024-11-20T22:23:52,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/890a3fad22c64d4ca39c45fc26e30508, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T22:23:52,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 175bc25ef8aacc6207ddcddcc7da4d90 in 784ms, sequenceid=76, compaction requested=false 2024-11-20T22:23:52,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:52,509 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T22:23:52,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:52,510 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:23:52,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:52,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:52,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:52,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:52,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:52,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:52,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/b46dae7e3f5845349b544c986ffe4d19 is 50, key is test_row_0/A:col10/1732141431669/Put/seqid=0 2024-11-20T22:23:52,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741854_1030 (size=12001) 2024-11-20T22:23:52,564 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/b46dae7e3f5845349b544c986ffe4d19 2024-11-20T22:23:52,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/e5cdd33944f84645ade7b4ea48cc33ad is 50, key is test_row_0/B:col10/1732141431669/Put/seqid=0 2024-11-20T22:23:52,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741855_1031 (size=12001) 2024-11-20T22:23:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:52,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:52,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141492922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141492927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141492930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141492935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:52,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:52,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141492935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141493044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141493044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141493044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141493044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,055 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/e5cdd33944f84645ade7b4ea48cc33ad 2024-11-20T22:23:53,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141493056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d7e3dc9ba0614e9392757d68dbf12750 is 50, key is test_row_0/C:col10/1732141431669/Put/seqid=0 2024-11-20T22:23:53,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741856_1032 (size=12001) 2024-11-20T22:23:53,147 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d7e3dc9ba0614e9392757d68dbf12750 2024-11-20T22:23:53,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/b46dae7e3f5845349b544c986ffe4d19 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/b46dae7e3f5845349b544c986ffe4d19 2024-11-20T22:23:53,182 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/b46dae7e3f5845349b544c986ffe4d19, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T22:23:53,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/e5cdd33944f84645ade7b4ea48cc33ad as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5cdd33944f84645ade7b4ea48cc33ad 2024-11-20T22:23:53,199 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5cdd33944f84645ade7b4ea48cc33ad, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T22:23:53,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d7e3dc9ba0614e9392757d68dbf12750 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d7e3dc9ba0614e9392757d68dbf12750 2024-11-20T22:23:53,221 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d7e3dc9ba0614e9392757d68dbf12750, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T22:23:53,223 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 175bc25ef8aacc6207ddcddcc7da4d90 in 713ms, sequenceid=90, compaction requested=true 2024-11-20T22:23:53,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:53,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:53,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T22:23:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T22:23:53,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T22:23:53,230 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0300 sec 2024-11-20T22:23:53,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.0430 sec 2024-11-20T22:23:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:53,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T22:23:53,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:53,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:53,284 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/a941ca588f414160a4172b76b1893c8a is 50, key is test_row_0/A:col10/1732141433258/Put/seqid=0 2024-11-20T22:23:53,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141493283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141493287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141493289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141493291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T22:23:53,300 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T22:23:53,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:53,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141493285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T22:23:53,306 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:53,308 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:53,308 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:53,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741857_1033 (size=14341) 2024-11-20T22:23:53,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/a941ca588f414160a4172b76b1893c8a 2024-11-20T22:23:53,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/ae3b86f957a34f82b6d900df4c89aafc is 50, key is test_row_0/B:col10/1732141433258/Put/seqid=0 2024-11-20T22:23:53,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741858_1034 (size=12001) 2024-11-20T22:23:53,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/ae3b86f957a34f82b6d900df4c89aafc 2024-11-20T22:23:53,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141493398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141493399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141493400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141493401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141493405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:53,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/de6b5fe3ca684568b099f905795d8da5 is 50, key is test_row_0/C:col10/1732141433258/Put/seqid=0 2024-11-20T22:23:53,465 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741859_1035 (size=12001) 2024-11-20T22:23:53,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T22:23:53,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:53,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:53,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:53,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:53,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:53,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/de6b5fe3ca684568b099f905795d8da5 2024-11-20T22:23:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/a941ca588f414160a4172b76b1893c8a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a941ca588f414160a4172b76b1893c8a 2024-11-20T22:23:53,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a941ca588f414160a4172b76b1893c8a, entries=200, sequenceid=119, filesize=14.0 K 2024-11-20T22:23:53,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/ae3b86f957a34f82b6d900df4c89aafc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/ae3b86f957a34f82b6d900df4c89aafc 2024-11-20T22:23:53,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/ae3b86f957a34f82b6d900df4c89aafc, entries=150, sequenceid=119, filesize=11.7 K 2024-11-20T22:23:53,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/de6b5fe3ca684568b099f905795d8da5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/de6b5fe3ca684568b099f905795d8da5 2024-11-20T22:23:53,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/de6b5fe3ca684568b099f905795d8da5, entries=150, sequenceid=119, filesize=11.7 K 2024-11-20T22:23:53,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=40.25 KB/41220 for 175bc25ef8aacc6207ddcddcc7da4d90 in 280ms, sequenceid=119, compaction requested=true 2024-11-20T22:23:53,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:53,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:53,543 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:53,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:53,543 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:53,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:53,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:53,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:53,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:53,549 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:53,549 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:53,549 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:23:53,549 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:23:53,549 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:53,549 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:53,549 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8f2e65538875446db7923b937efb8cfc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/bd6682dbbb514251bd51a5b7d48c4686, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5cdd33944f84645ade7b4ea48cc33ad, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/ae3b86f957a34f82b6d900df4c89aafc] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=47.0 K 2024-11-20T22:23:53,549 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/85e1e46af61048c2b171b39c038a9cb8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5237bffaf1ff482b8b847b9f25df87f2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/b46dae7e3f5845349b544c986ffe4d19, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a941ca588f414160a4172b76b1893c8a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=51.5 K 2024-11-20T22:23:53,551 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85e1e46af61048c2b171b39c038a9cb8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141429668 2024-11-20T22:23:53,551 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f2e65538875446db7923b937efb8cfc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141429668 2024-11-20T22:23:53,552 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting bd6682dbbb514251bd51a5b7d48c4686, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732141430463 2024-11-20T22:23:53,553 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5237bffaf1ff482b8b847b9f25df87f2, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732141430446 2024-11-20T22:23:53,553 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b46dae7e3f5845349b544c986ffe4d19, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141431655 2024-11-20T22:23:53,554 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e5cdd33944f84645ade7b4ea48cc33ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141431655 2024-11-20T22:23:53,555 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a941ca588f414160a4172b76b1893c8a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732141432926 2024-11-20T22:23:53,555 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ae3b86f957a34f82b6d900df4c89aafc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732141433258 2024-11-20T22:23:53,594 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#21 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:53,596 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/b562521754094d0b9f59f71451d031ff is 50, key is test_row_0/B:col10/1732141433258/Put/seqid=0 2024-11-20T22:23:53,610 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:53,614 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/59c35ef445fc41b784f28f5817e41e8b is 50, key is test_row_0/A:col10/1732141433258/Put/seqid=0 2024-11-20T22:23:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:53,623 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T22:23:53,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:53,625 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T22:23:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:53,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:53,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741860_1036 (size=12241) 2024-11-20T22:23:53,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:53,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:53,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:53,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:53,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:53,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:53,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741861_1037 (size=12241) 2024-11-20T22:23:53,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/f04230df08fd42928c6b4787be909d97 is 50, key is test_row_0/A:col10/1732141433282/Put/seqid=0 2024-11-20T22:23:53,672 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/59c35ef445fc41b784f28f5817e41e8b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/59c35ef445fc41b784f28f5817e41e8b 2024-11-20T22:23:53,689 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into 59c35ef445fc41b784f28f5817e41e8b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:53,689 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:53,690 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=12, startTime=1732141433542; duration=0sec 2024-11-20T22:23:53,690 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:53,690 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:23:53,690 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:23:53,694 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:23:53,694 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:23:53,695 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:53,696 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ecbe8d3a0c56418383b1640d87f8e644, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/890a3fad22c64d4ca39c45fc26e30508, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d7e3dc9ba0614e9392757d68dbf12750, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/de6b5fe3ca684568b099f905795d8da5] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=47.0 K 2024-11-20T22:23:53,697 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ecbe8d3a0c56418383b1640d87f8e644, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141429668 2024-11-20T22:23:53,698 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 890a3fad22c64d4ca39c45fc26e30508, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732141430463 2024-11-20T22:23:53,699 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7e3dc9ba0614e9392757d68dbf12750, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141431655 2024-11-20T22:23:53,701 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting de6b5fe3ca684568b099f905795d8da5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732141433258 2024-11-20T22:23:53,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741862_1038 (size=14441) 2024-11-20T22:23:53,708 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/f04230df08fd42928c6b4787be909d97 2024-11-20T22:23:53,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/3f228cb1012b4db18b80ced8265d6c2b is 50, key is test_row_0/B:col10/1732141433282/Put/seqid=0 2024-11-20T22:23:53,747 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#25 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:53,748 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/2b179690856f4b099d1cf8e0c45b6038 is 50, key is test_row_0/C:col10/1732141433258/Put/seqid=0 2024-11-20T22:23:53,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141493738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141493738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141493741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141493742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141493742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741863_1039 (size=12051) 2024-11-20T22:23:53,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741864_1040 (size=12241) 2024-11-20T22:23:53,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141493902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141493902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141493906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141493906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:53,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141493906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:53,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:54,055 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/b562521754094d0b9f59f71451d031ff as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b562521754094d0b9f59f71451d031ff 2024-11-20T22:23:54,092 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into b562521754094d0b9f59f71451d031ff(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:54,092 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:54,092 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=12, startTime=1732141433543; duration=0sec 2024-11-20T22:23:54,093 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:54,093 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:23:54,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141494110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141494110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141494110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141494110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141494111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,208 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/3f228cb1012b4db18b80ced8265d6c2b 2024-11-20T22:23:54,219 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/2b179690856f4b099d1cf8e0c45b6038 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2b179690856f4b099d1cf8e0c45b6038 2024-11-20T22:23:54,244 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into 2b179690856f4b099d1cf8e0c45b6038(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:54,244 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:54,245 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=12, startTime=1732141433546; duration=0sec 2024-11-20T22:23:54,245 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:54,246 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:23:54,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/bcb23bb7d9544120aaa78472f981e1c8 is 50, key is test_row_0/C:col10/1732141433282/Put/seqid=0 2024-11-20T22:23:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741865_1041 (size=12051) 2024-11-20T22:23:54,337 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/bcb23bb7d9544120aaa78472f981e1c8 2024-11-20T22:23:54,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/f04230df08fd42928c6b4787be909d97 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/f04230df08fd42928c6b4787be909d97 2024-11-20T22:23:54,371 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/f04230df08fd42928c6b4787be909d97, entries=200, sequenceid=132, filesize=14.1 K 2024-11-20T22:23:54,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/3f228cb1012b4db18b80ced8265d6c2b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/3f228cb1012b4db18b80ced8265d6c2b 2024-11-20T22:23:54,408 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/3f228cb1012b4db18b80ced8265d6c2b, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T22:23:54,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/bcb23bb7d9544120aaa78472f981e1c8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bcb23bb7d9544120aaa78472f981e1c8 2024-11-20T22:23:54,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:54,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141494418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,433 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bcb23bb7d9544120aaa78472f981e1c8, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T22:23:54,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141494423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141494423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,459 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 175bc25ef8aacc6207ddcddcc7da4d90 in 813ms, sequenceid=132, compaction requested=false 2024-11-20T22:23:54,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:54,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:54,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T22:23:54,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T22:23:54,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:54,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T22:23:54,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:54,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:54,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:54,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:54,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:54,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:54,471 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T22:23:54,471 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1560 sec 2024-11-20T22:23:54,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/fcd02affc1c84f1285f82c99e85928a5 is 50, key is test_row_0/A:col10/1732141433735/Put/seqid=0 2024-11-20T22:23:54,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.1770 sec 2024-11-20T22:23:54,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141494519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741866_1042 (size=16931) 2024-11-20T22:23:54,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141494531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141494632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141494640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141494850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141494865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/fcd02affc1c84f1285f82c99e85928a5 2024-11-20T22:23:54,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141494937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:54,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141494942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141494939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:54,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/09c63b1b608e4e85998d990e2944ccbc is 50, key is test_row_0/B:col10/1732141433735/Put/seqid=0 2024-11-20T22:23:55,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741867_1043 (size=12151) 2024-11-20T22:23:55,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/09c63b1b608e4e85998d990e2944ccbc 2024-11-20T22:23:55,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/553c8d84d2bc4c30adea3458906d6cfd is 50, key is test_row_0/C:col10/1732141433735/Put/seqid=0 2024-11-20T22:23:55,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741868_1044 (size=12151) 2024-11-20T22:23:55,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141495176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:55,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141495177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:55,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T22:23:55,423 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T22:23:55,426 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:55,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T22:23:55,430 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:55,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:55,435 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:55,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:55,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/553c8d84d2bc4c30adea3458906d6cfd 2024-11-20T22:23:55,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/fcd02affc1c84f1285f82c99e85928a5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/fcd02affc1c84f1285f82c99e85928a5 2024-11-20T22:23:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:55,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/fcd02affc1c84f1285f82c99e85928a5, entries=250, sequenceid=163, filesize=16.5 K 2024-11-20T22:23:55,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/09c63b1b608e4e85998d990e2944ccbc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/09c63b1b608e4e85998d990e2944ccbc 2024-11-20T22:23:55,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/09c63b1b608e4e85998d990e2944ccbc, entries=150, sequenceid=163, filesize=11.9 K 2024-11-20T22:23:55,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/553c8d84d2bc4c30adea3458906d6cfd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/553c8d84d2bc4c30adea3458906d6cfd 2024-11-20T22:23:55,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/553c8d84d2bc4c30adea3458906d6cfd, entries=150, sequenceid=163, filesize=11.9 K 2024-11-20T22:23:55,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:55,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 175bc25ef8aacc6207ddcddcc7da4d90 in 1130ms, sequenceid=163, compaction requested=true 2024-11-20T22:23:55,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:55,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:55,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:55,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:55,593 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:55,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:55,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:55,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:23:55,593 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:55,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T22:23:55,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:55,595 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:23:55,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:55,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:55,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:55,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:55,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:55,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:55,602 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:55,602 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:23:55,602 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:55,603 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43613 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:55,603 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:23:55,603 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:55,604 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/59c35ef445fc41b784f28f5817e41e8b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/f04230df08fd42928c6b4787be909d97, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/fcd02affc1c84f1285f82c99e85928a5] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=42.6 K 2024-11-20T22:23:55,604 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b562521754094d0b9f59f71451d031ff, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/3f228cb1012b4db18b80ced8265d6c2b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/09c63b1b608e4e85998d990e2944ccbc] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=35.6 K 2024-11-20T22:23:55,605 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59c35ef445fc41b784f28f5817e41e8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732141433258 2024-11-20T22:23:55,606 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b562521754094d0b9f59f71451d031ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732141433258 2024-11-20T22:23:55,607 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f228cb1012b4db18b80ced8265d6c2b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141433282 2024-11-20T22:23:55,608 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f04230df08fd42928c6b4787be909d97, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141433282 2024-11-20T22:23:55,608 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 09c63b1b608e4e85998d990e2944ccbc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141433735 2024-11-20T22:23:55,610 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcd02affc1c84f1285f82c99e85928a5, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141433735 2024-11-20T22:23:55,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd8a797cc6234d2a911a16d4d013751d is 50, key is test_row_0/A:col10/1732141434498/Put/seqid=0 2024-11-20T22:23:55,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741869_1045 (size=9757) 2024-11-20T22:23:55,673 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd8a797cc6234d2a911a16d4d013751d 2024-11-20T22:23:55,705 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#31 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:55,706 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/7806b20ac1714a228c6e427d3009517f is 50, key is test_row_0/A:col10/1732141433735/Put/seqid=0 2024-11-20T22:23:55,727 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#32 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:55,728 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/916c11e51a6b4c7f89392c3245f8b312 is 50, key is test_row_0/B:col10/1732141433735/Put/seqid=0 2024-11-20T22:23:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:55,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/6fc75abbab1f4b45b772226d0b9f6f8c is 50, key is test_row_0/B:col10/1732141434498/Put/seqid=0 2024-11-20T22:23:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:55,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:55,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741871_1047 (size=12493) 2024-11-20T22:23:55,828 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/916c11e51a6b4c7f89392c3245f8b312 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/916c11e51a6b4c7f89392c3245f8b312 2024-11-20T22:23:55,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741870_1046 (size=12493) 2024-11-20T22:23:55,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741872_1048 (size=9757) 2024-11-20T22:23:55,847 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into 916c11e51a6b4c7f89392c3245f8b312(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:55,848 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:55,848 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=13, startTime=1732141435592; duration=0sec 2024-11-20T22:23:55,848 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:55,848 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:23:55,849 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/6fc75abbab1f4b45b772226d0b9f6f8c 2024-11-20T22:23:55,849 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:55,874 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:55,874 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/7806b20ac1714a228c6e427d3009517f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7806b20ac1714a228c6e427d3009517f 2024-11-20T22:23:55,874 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:23:55,875 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:55,886 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2b179690856f4b099d1cf8e0c45b6038, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bcb23bb7d9544120aaa78472f981e1c8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/553c8d84d2bc4c30adea3458906d6cfd] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=35.6 K 2024-11-20T22:23:55,888 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b179690856f4b099d1cf8e0c45b6038, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732141433258 2024-11-20T22:23:55,888 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting bcb23bb7d9544120aaa78472f981e1c8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141433282 2024-11-20T22:23:55,889 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 553c8d84d2bc4c30adea3458906d6cfd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141433735 2024-11-20T22:23:55,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/afbc0f64a32c4df69bdb7dc50aea3c58 is 50, key is test_row_0/C:col10/1732141434498/Put/seqid=0 2024-11-20T22:23:55,900 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into 7806b20ac1714a228c6e427d3009517f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:55,900 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:55,900 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=13, startTime=1732141435592; duration=0sec 2024-11-20T22:23:55,900 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:55,900 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:23:55,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741873_1049 (size=9757) 2024-11-20T22:23:55,935 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#35 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:55,935 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/afbc0f64a32c4df69bdb7dc50aea3c58 2024-11-20T22:23:55,936 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/cf3545c62aa54927a544f74c8b8d848c is 50, key is test_row_0/C:col10/1732141433735/Put/seqid=0 2024-11-20T22:23:55,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd8a797cc6234d2a911a16d4d013751d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd8a797cc6234d2a911a16d4d013751d 2024-11-20T22:23:55,964 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd8a797cc6234d2a911a16d4d013751d, entries=100, sequenceid=171, filesize=9.5 K 2024-11-20T22:23:55,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/6fc75abbab1f4b45b772226d0b9f6f8c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/6fc75abbab1f4b45b772226d0b9f6f8c 2024-11-20T22:23:55,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141495959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:55,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141495959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:55,981 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/6fc75abbab1f4b45b772226d0b9f6f8c, entries=100, sequenceid=171, filesize=9.5 K 2024-11-20T22:23:55,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141495963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:55,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141495972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:55,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:55,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141495978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:55,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/afbc0f64a32c4df69bdb7dc50aea3c58 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/afbc0f64a32c4df69bdb7dc50aea3c58 2024-11-20T22:23:56,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741874_1050 (size=12493) 2024-11-20T22:23:56,025 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/afbc0f64a32c4df69bdb7dc50aea3c58, entries=100, sequenceid=171, filesize=9.5 K 2024-11-20T22:23:56,027 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 175bc25ef8aacc6207ddcddcc7da4d90 in 431ms, sequenceid=171, compaction requested=false 2024-11-20T22:23:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T22:23:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T22:23:56,036 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T22:23:56,036 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 597 msec 2024-11-20T22:23:56,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 614 msec 2024-11-20T22:23:56,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T22:23:56,043 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T22:23:56,048 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:23:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T22:23:56,053 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:23:56,055 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:23:56,055 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:23:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:56,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:56,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T22:23:56,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:56,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:56,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:56,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:56,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:56,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:56,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/0ed185652132461db82066992f283e47 is 50, key is test_row_0/A:col10/1732141435940/Put/seqid=0 2024-11-20T22:23:56,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141496116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141496122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141496117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741875_1051 (size=14541) 2024-11-20T22:23:56,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/0ed185652132461db82066992f283e47 2024-11-20T22:23:56,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:56,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/7988020eaeb5446eb9885f6118ece42f is 50, key is test_row_0/B:col10/1732141435940/Put/seqid=0 2024-11-20T22:23:56,214 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:56,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:56,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141496228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741876_1052 (size=12151) 2024-11-20T22:23:56,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141496252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141496253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:56,384 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:56,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:56,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,417 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/cf3545c62aa54927a544f74c8b8d848c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cf3545c62aa54927a544f74c8b8d848c 2024-11-20T22:23:56,442 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into cf3545c62aa54927a544f74c8b8d848c(size=12.2 K), total size for store is 21.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:56,442 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:56,443 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=13, startTime=1732141435593; duration=0sec 2024-11-20T22:23:56,444 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:56,444 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:23:56,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141496456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141496477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141496479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,539 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:56,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:56,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/7988020eaeb5446eb9885f6118ece42f 2024-11-20T22:23:56,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:56,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/dce45913be7f47b6af04697dac8a57b8 is 50, key is test_row_0/C:col10/1732141435940/Put/seqid=0 2024-11-20T22:23:56,696 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741877_1053 (size=12151) 2024-11-20T22:23:56,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141496772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141496784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:56,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141496789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,851 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:56,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:56,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:56,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:56,853 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:56,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:57,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:57,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/dce45913be7f47b6af04697dac8a57b8 2024-11-20T22:23:57,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/0ed185652132461db82066992f283e47 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/0ed185652132461db82066992f283e47 2024-11-20T22:23:57,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/0ed185652132461db82066992f283e47, entries=200, sequenceid=202, filesize=14.2 K 2024-11-20T22:23:57,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:57,165 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/7988020eaeb5446eb9885f6118ece42f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7988020eaeb5446eb9885f6118ece42f 2024-11-20T22:23:57,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:57,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:57,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7988020eaeb5446eb9885f6118ece42f, entries=150, sequenceid=202, filesize=11.9 K 2024-11-20T22:23:57,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/dce45913be7f47b6af04697dac8a57b8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dce45913be7f47b6af04697dac8a57b8 2024-11-20T22:23:57,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dce45913be7f47b6af04697dac8a57b8, entries=150, sequenceid=202, filesize=11.9 K 2024-11-20T22:23:57,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 175bc25ef8aacc6207ddcddcc7da4d90 in 1109ms, sequenceid=202, compaction requested=true 2024-11-20T22:23:57,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:57,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:23:57,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:57,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:23:57,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:23:57,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:23:57,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T22:23:57,210 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:57,210 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:57,213 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36791 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:57,213 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:23:57,213 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,213 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7806b20ac1714a228c6e427d3009517f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd8a797cc6234d2a911a16d4d013751d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/0ed185652132461db82066992f283e47] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=35.9 K 2024-11-20T22:23:57,215 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:57,215 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:23:57,215 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,215 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cf3545c62aa54927a544f74c8b8d848c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/afbc0f64a32c4df69bdb7dc50aea3c58, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dce45913be7f47b6af04697dac8a57b8] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=33.6 K 2024-11-20T22:23:57,216 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7806b20ac1714a228c6e427d3009517f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141433735 2024-11-20T22:23:57,217 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting cf3545c62aa54927a544f74c8b8d848c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141433735 2024-11-20T22:23:57,218 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting afbc0f64a32c4df69bdb7dc50aea3c58, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141434498 2024-11-20T22:23:57,219 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd8a797cc6234d2a911a16d4d013751d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141434498 2024-11-20T22:23:57,220 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting dce45913be7f47b6af04697dac8a57b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732141435940 2024-11-20T22:23:57,221 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ed185652132461db82066992f283e47, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732141435940 2024-11-20T22:23:57,284 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:57,285 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/a2f1cef32c224c9f94d58ea9c7a074d5 is 50, key is test_row_0/C:col10/1732141435940/Put/seqid=0 2024-11-20T22:23:57,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:23:57,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:57,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:57,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:57,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:57,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:57,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:57,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:57,315 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:57,315 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/d445901f96eb46e2b4a9ef0a4b393a89 is 50, key is test_row_0/A:col10/1732141435940/Put/seqid=0 2024-11-20T22:23:57,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:57,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:57,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,325 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/a0cb1a20716947feb2a9001624061a9b is 50, key is test_row_0/A:col10/1732141437305/Put/seqid=0 2024-11-20T22:23:57,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141497392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141497405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141497408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741878_1054 (size=12595) 2024-11-20T22:23:57,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741879_1055 (size=12595) 2024-11-20T22:23:57,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741880_1056 (size=16931) 2024-11-20T22:23:57,482 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:57,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:57,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141497516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141497539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141497547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,637 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:57,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:57,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,639 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141497728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141497753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141497768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,798 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,839 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/a2f1cef32c224c9f94d58ea9c7a074d5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/a2f1cef32c224c9f94d58ea9c7a074d5 2024-11-20T22:23:57,843 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/a0cb1a20716947feb2a9001624061a9b 2024-11-20T22:23:57,862 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/d445901f96eb46e2b4a9ef0a4b393a89 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d445901f96eb46e2b4a9ef0a4b393a89 2024-11-20T22:23:57,876 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into a2f1cef32c224c9f94d58ea9c7a074d5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:57,876 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:57,876 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=13, startTime=1732141437210; duration=0sec 2024-11-20T22:23:57,876 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:23:57,876 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:23:57,876 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:23:57,877 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into d445901f96eb46e2b4a9ef0a4b393a89(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:57,877 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:57,877 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=13, startTime=1732141437209; duration=0sec 2024-11-20T22:23:57,879 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:57,879 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:23:57,879 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:23:57,879 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:23:57,879 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,881 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/916c11e51a6b4c7f89392c3245f8b312, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/6fc75abbab1f4b45b772226d0b9f6f8c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7988020eaeb5446eb9885f6118ece42f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=33.6 K 2024-11-20T22:23:57,881 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 916c11e51a6b4c7f89392c3245f8b312, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141433735 2024-11-20T22:23:57,882 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fc75abbab1f4b45b772226d0b9f6f8c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141434498 2024-11-20T22:23:57,883 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7988020eaeb5446eb9885f6118ece42f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732141435940 2024-11-20T22:23:57,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/fefd7512f3ac44b29052db4b2c980128 is 50, key is test_row_0/B:col10/1732141437305/Put/seqid=0 2024-11-20T22:23:57,917 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#43 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:23:57,918 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/088b3324c34b422fb0de33d695bf4f5a is 50, key is test_row_0/B:col10/1732141435940/Put/seqid=0 2024-11-20T22:23:57,956 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:57,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741881_1057 (size=12151) 2024-11-20T22:23:57,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:57,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:57,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:57,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/fefd7512f3ac44b29052db4b2c980128 2024-11-20T22:23:57,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741882_1058 (size=12595) 2024-11-20T22:23:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:57,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141497994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,007 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4268 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., hostname=6365a1e51efd,46811,1732141422048, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:23:58,009 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/088b3324c34b422fb0de33d695bf4f5a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/088b3324c34b422fb0de33d695bf4f5a 2024-11-20T22:23:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141497983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,015 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4273 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., hostname=6365a1e51efd,46811,1732141422048, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:23:58,038 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into 088b3324c34b422fb0de33d695bf4f5a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:23:58,038 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:58,039 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=13, startTime=1732141437209; duration=0sec 2024-11-20T22:23:58,039 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:23:58,039 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:23:58,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141498040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/2ca3adacd1a5421197bf25ec09a5c5a7 is 50, key is test_row_0/C:col10/1732141437305/Put/seqid=0 2024-11-20T22:23:58,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141498062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741883_1059 (size=12151) 2024-11-20T22:23:58,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/2ca3adacd1a5421197bf25ec09a5c5a7 2024-11-20T22:23:58,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/a0cb1a20716947feb2a9001624061a9b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a0cb1a20716947feb2a9001624061a9b 2024-11-20T22:23:58,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141498081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a0cb1a20716947feb2a9001624061a9b, entries=250, sequenceid=215, filesize=16.5 K 2024-11-20T22:23:58,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/fefd7512f3ac44b29052db4b2c980128 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/fefd7512f3ac44b29052db4b2c980128 2024-11-20T22:23:58,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/fefd7512f3ac44b29052db4b2c980128, entries=150, sequenceid=215, filesize=11.9 K 2024-11-20T22:23:58,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/2ca3adacd1a5421197bf25ec09a5c5a7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2ca3adacd1a5421197bf25ec09a5c5a7 2024-11-20T22:23:58,129 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:58,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:58,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:58,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:23:58,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2ca3adacd1a5421197bf25ec09a5c5a7, entries=150, sequenceid=215, filesize=11.9 K 2024-11-20T22:23:58,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 175bc25ef8aacc6207ddcddcc7da4d90 in 833ms, sequenceid=215, compaction requested=false 2024-11-20T22:23:58,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:58,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:23:58,286 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T22:23:58,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:58,287 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:23:58,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:58,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:58,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:58,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:58,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:58,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:58,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/96a052a782e4417a94a0d194ffb3fcc5 is 50, key is test_row_0/A:col10/1732141437405/Put/seqid=0 2024-11-20T22:23:58,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741884_1060 (size=12151) 2024-11-20T22:23:58,348 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/96a052a782e4417a94a0d194ffb3fcc5 2024-11-20T22:23:58,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/f3bd23b3f8ff418c97daa972cffe8ea6 is 50, key is test_row_0/B:col10/1732141437405/Put/seqid=0 2024-11-20T22:23:58,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741885_1061 (size=12151) 2024-11-20T22:23:58,420 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/f3bd23b3f8ff418c97daa972cffe8ea6 2024-11-20T22:23:58,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ee5282deb1174a03994c721dea91a110 is 50, key is test_row_0/C:col10/1732141437405/Put/seqid=0 2024-11-20T22:23:58,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741886_1062 (size=12151) 2024-11-20T22:23:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:58,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:23:58,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141498602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141498616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141498617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141498718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141498723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141498726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,910 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ee5282deb1174a03994c721dea91a110 2024-11-20T22:23:58,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/96a052a782e4417a94a0d194ffb3fcc5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/96a052a782e4417a94a0d194ffb3fcc5 2024-11-20T22:23:58,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141498937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,952 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/96a052a782e4417a94a0d194ffb3fcc5, entries=150, sequenceid=242, filesize=11.9 K 2024-11-20T22:23:58,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141498938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:58,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141498939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:58,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/f3bd23b3f8ff418c97daa972cffe8ea6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f3bd23b3f8ff418c97daa972cffe8ea6 2024-11-20T22:23:58,971 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f3bd23b3f8ff418c97daa972cffe8ea6, entries=150, sequenceid=242, filesize=11.9 K 2024-11-20T22:23:58,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ee5282deb1174a03994c721dea91a110 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ee5282deb1174a03994c721dea91a110 2024-11-20T22:23:58,991 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ee5282deb1174a03994c721dea91a110, entries=150, sequenceid=242, filesize=11.9 K 2024-11-20T22:23:58,992 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 175bc25ef8aacc6207ddcddcc7da4d90 in 705ms, sequenceid=242, compaction requested=true 2024-11-20T22:23:58,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:23:58,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:23:58,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T22:23:58,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T22:23:58,997 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T22:23:58,997 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9400 sec 2024-11-20T22:23:59,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.9500 sec 2024-11-20T22:23:59,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:23:59,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:23:59,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:59,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:23:59,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:59,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:23:59,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:23:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:23:59,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/4e7a7f175bfe4275bb8a6f0bde35e053 is 50, key is test_row_0/A:col10/1732141439258/Put/seqid=0 2024-11-20T22:23:59,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741887_1063 (size=12151) 2024-11-20T22:23:59,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141499364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141499362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141499377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141499480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141499480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141499496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141499699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141499701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:23:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141499716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:23:59,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/4e7a7f175bfe4275bb8a6f0bde35e053 2024-11-20T22:23:59,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/7be0f84c23c0497a9b15848ff086bf1a is 50, key is test_row_0/B:col10/1732141439258/Put/seqid=0 2024-11-20T22:23:59,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741888_1064 (size=12151) 2024-11-20T22:23:59,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/7be0f84c23c0497a9b15848ff086bf1a 2024-11-20T22:23:59,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ac25df4daf3d48efb2986a802b38f18e is 50, key is test_row_0/C:col10/1732141439258/Put/seqid=0 2024-11-20T22:23:59,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741889_1065 (size=12151) 2024-11-20T22:24:00,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141500017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141500030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141500038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T22:24:00,169 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T22:24:00,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T22:24:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:24:00,173 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:00,174 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:00,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:24:00,327 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T22:24:00,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:00,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:00,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:00,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:00,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:00,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:00,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ac25df4daf3d48efb2986a802b38f18e 2024-11-20T22:24:00,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/4e7a7f175bfe4275bb8a6f0bde35e053 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4e7a7f175bfe4275bb8a6f0bde35e053 2024-11-20T22:24:00,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4e7a7f175bfe4275bb8a6f0bde35e053, entries=150, sequenceid=256, filesize=11.9 K 2024-11-20T22:24:00,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/7be0f84c23c0497a9b15848ff086bf1a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7be0f84c23c0497a9b15848ff086bf1a 2024-11-20T22:24:00,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7be0f84c23c0497a9b15848ff086bf1a, entries=150, sequenceid=256, filesize=11.9 K 2024-11-20T22:24:00,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ac25df4daf3d48efb2986a802b38f18e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ac25df4daf3d48efb2986a802b38f18e 2024-11-20T22:24:00,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ac25df4daf3d48efb2986a802b38f18e, entries=150, sequenceid=256, filesize=11.9 K 2024-11-20T22:24:00,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 175bc25ef8aacc6207ddcddcc7da4d90 in 1154ms, sequenceid=256, compaction requested=true 2024-11-20T22:24:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:00,418 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:00,418 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:00,420 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:00,420 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:24:00,421 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:00,421 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/088b3324c34b422fb0de33d695bf4f5a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/fefd7512f3ac44b29052db4b2c980128, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f3bd23b3f8ff418c97daa972cffe8ea6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7be0f84c23c0497a9b15848ff086bf1a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=47.9 K 2024-11-20T22:24:00,423 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:00,423 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:24:00,423 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:00,424 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d445901f96eb46e2b4a9ef0a4b393a89, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a0cb1a20716947feb2a9001624061a9b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/96a052a782e4417a94a0d194ffb3fcc5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4e7a7f175bfe4275bb8a6f0bde35e053] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=52.6 K 2024-11-20T22:24:00,424 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 088b3324c34b422fb0de33d695bf4f5a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732141435940 2024-11-20T22:24:00,425 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d445901f96eb46e2b4a9ef0a4b393a89, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732141435940 2024-11-20T22:24:00,425 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fefd7512f3ac44b29052db4b2c980128, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732141436120 2024-11-20T22:24:00,425 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0cb1a20716947feb2a9001624061a9b, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732141436120 2024-11-20T22:24:00,425 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f3bd23b3f8ff418c97daa972cffe8ea6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732141437385 2024-11-20T22:24:00,426 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96a052a782e4417a94a0d194ffb3fcc5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732141437385 2024-11-20T22:24:00,426 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7be0f84c23c0497a9b15848ff086bf1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732141438600 2024-11-20T22:24:00,426 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e7a7f175bfe4275bb8a6f0bde35e053, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732141438600 2024-11-20T22:24:00,454 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#51 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:00,455 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/77d5cf472ecb4da6b620e46af29fd2bd is 50, key is test_row_0/B:col10/1732141439258/Put/seqid=0 2024-11-20T22:24:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:24:00,486 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:00,486 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,487 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/391c9d7ebc19433da60c6404d47c54cb is 50, key is test_row_0/A:col10/1732141439258/Put/seqid=0 2024-11-20T22:24:00,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T22:24:00,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:00,487 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:00,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741890_1066 (size=12731) 2024-11-20T22:24:00,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:00,539 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/77d5cf472ecb4da6b620e46af29fd2bd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/77d5cf472ecb4da6b620e46af29fd2bd 2024-11-20T22:24:00,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/289fc1fae7b84ab79fd8ccc70189c97c is 50, key is test_row_0/A:col10/1732141439359/Put/seqid=0 2024-11-20T22:24:00,561 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into 77d5cf472ecb4da6b620e46af29fd2bd(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:00,562 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:00,562 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=12, startTime=1732141440418; duration=0sec 2024-11-20T22:24:00,562 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:00,562 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:24:00,562 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:00,565 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:00,565 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:24:00,565 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:00,565 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/a2f1cef32c224c9f94d58ea9c7a074d5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2ca3adacd1a5421197bf25ec09a5c5a7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ee5282deb1174a03994c721dea91a110, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ac25df4daf3d48efb2986a802b38f18e] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=47.9 K 2024-11-20T22:24:00,566 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a2f1cef32c224c9f94d58ea9c7a074d5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732141435940 2024-11-20T22:24:00,567 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ca3adacd1a5421197bf25ec09a5c5a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732141436120 2024-11-20T22:24:00,568 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ee5282deb1174a03994c721dea91a110, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732141437385 2024-11-20T22:24:00,569 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ac25df4daf3d48efb2986a802b38f18e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732141438600 2024-11-20T22:24:00,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141500610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141500619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141500624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,629 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#54 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:00,630 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/c8a1a85f41f34d3193679a2dedece2e8 is 50, key is test_row_0/C:col10/1732141439258/Put/seqid=0 2024-11-20T22:24:00,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741892_1068 (size=12301) 2024-11-20T22:24:00,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741891_1067 (size=12731) 2024-11-20T22:24:00,668 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/289fc1fae7b84ab79fd8ccc70189c97c 2024-11-20T22:24:00,677 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/391c9d7ebc19433da60c6404d47c54cb as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/391c9d7ebc19433da60c6404d47c54cb 2024-11-20T22:24:00,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/d9fbad44e3e24f6799895bf626e531da is 50, key is test_row_0/B:col10/1732141439359/Put/seqid=0 2024-11-20T22:24:00,691 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into 391c9d7ebc19433da60c6404d47c54cb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:00,691 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:00,691 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=12, startTime=1732141440418; duration=0sec 2024-11-20T22:24:00,691 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:00,691 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:24:00,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741893_1069 (size=12731) 2024-11-20T22:24:00,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141500722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141500729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141500729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741894_1070 (size=12301) 2024-11-20T22:24:00,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:24:00,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141500927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141500939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:00,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141500939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:01,143 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/c8a1a85f41f34d3193679a2dedece2e8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/c8a1a85f41f34d3193679a2dedece2e8 2024-11-20T22:24:01,148 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/d9fbad44e3e24f6799895bf626e531da 2024-11-20T22:24:01,174 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into c8a1a85f41f34d3193679a2dedece2e8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:01,174 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:01,174 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=12, startTime=1732141440418; duration=0sec 2024-11-20T22:24:01,175 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:01,175 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:24:01,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d74b4b5eac664e158a8a3c9b0aa46522 is 50, key is test_row_0/C:col10/1732141439359/Put/seqid=0 2024-11-20T22:24:01,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741895_1071 (size=12301) 2024-11-20T22:24:01,185 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d74b4b5eac664e158a8a3c9b0aa46522 2024-11-20T22:24:01,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/289fc1fae7b84ab79fd8ccc70189c97c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/289fc1fae7b84ab79fd8ccc70189c97c 2024-11-20T22:24:01,205 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/289fc1fae7b84ab79fd8ccc70189c97c, entries=150, sequenceid=278, filesize=12.0 K 2024-11-20T22:24:01,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/d9fbad44e3e24f6799895bf626e531da as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d9fbad44e3e24f6799895bf626e531da 2024-11-20T22:24:01,229 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d9fbad44e3e24f6799895bf626e531da, entries=150, sequenceid=278, filesize=12.0 K 2024-11-20T22:24:01,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/d74b4b5eac664e158a8a3c9b0aa46522 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d74b4b5eac664e158a8a3c9b0aa46522 2024-11-20T22:24:01,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141501233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:01,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141501245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:01,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141501249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:01,259 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d74b4b5eac664e158a8a3c9b0aa46522, entries=150, sequenceid=278, filesize=12.0 K 2024-11-20T22:24:01,261 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 175bc25ef8aacc6207ddcddcc7da4d90 in 773ms, sequenceid=278, compaction requested=false 2024-11-20T22:24:01,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:01,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:01,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T22:24:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T22:24:01,267 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T22:24:01,267 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0900 sec 2024-11-20T22:24:01,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.0960 sec 2024-11-20T22:24:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T22:24:01,287 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T22:24:01,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T22:24:01,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:24:01,292 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:01,295 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:01,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:01,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:24:01,449 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:01,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T22:24:01,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:01,450 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:01,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:01,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:01,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:01,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/436bf98ede6d4642910060bd3bc97bf1 is 50, key is test_row_0/A:col10/1732141440617/Put/seqid=0 2024-11-20T22:24:01,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741896_1072 (size=12301) 2024-11-20T22:24:01,536 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/436bf98ede6d4642910060bd3bc97bf1 2024-11-20T22:24:01,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/20f25d079f974995bd3b869a9d505b4c is 50, key is test_row_0/B:col10/1732141440617/Put/seqid=0 2024-11-20T22:24:01,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:24:01,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741897_1073 (size=12301) 2024-11-20T22:24:01,599 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/20f25d079f974995bd3b869a9d505b4c 2024-11-20T22:24:01,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/0bd68e5589c14d9d9bd8d827c08931d2 is 50, key is test_row_0/C:col10/1732141440617/Put/seqid=0 2024-11-20T22:24:01,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741898_1074 (size=12301) 2024-11-20T22:24:01,647 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/0bd68e5589c14d9d9bd8d827c08931d2 2024-11-20T22:24:01,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/436bf98ede6d4642910060bd3bc97bf1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/436bf98ede6d4642910060bd3bc97bf1 2024-11-20T22:24:01,686 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/436bf98ede6d4642910060bd3bc97bf1, entries=150, sequenceid=296, filesize=12.0 K 2024-11-20T22:24:01,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/20f25d079f974995bd3b869a9d505b4c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/20f25d079f974995bd3b869a9d505b4c 2024-11-20T22:24:01,697 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/20f25d079f974995bd3b869a9d505b4c, entries=150, sequenceid=296, filesize=12.0 K 2024-11-20T22:24:01,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/0bd68e5589c14d9d9bd8d827c08931d2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/0bd68e5589c14d9d9bd8d827c08931d2 2024-11-20T22:24:01,709 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/0bd68e5589c14d9d9bd8d827c08931d2, entries=150, sequenceid=296, filesize=12.0 K 2024-11-20T22:24:01,712 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=0 B/0 for 175bc25ef8aacc6207ddcddcc7da4d90 in 262ms, sequenceid=296, compaction requested=true 2024-11-20T22:24:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T22:24:01,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T22:24:01,717 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T22:24:01,717 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 418 msec 2024-11-20T22:24:01,721 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 429 msec 2024-11-20T22:24:01,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:01,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:01,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:01,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:01,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:01,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:01,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/5efc976288a9461183bbe101a40ad9cf is 50, key is test_row_1/A:col10/1732141441778/Put/seqid=0 2024-11-20T22:24:01,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741899_1075 (size=14737) 2024-11-20T22:24:01,871 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/5efc976288a9461183bbe101a40ad9cf 2024-11-20T22:24:01,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141501891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T22:24:01,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141501893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:01,897 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T22:24:01,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141501893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:01,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-20T22:24:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:24:01,901 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:01,902 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:01,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:01,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/f01b9bb7573d4630aabaabc5c7d53f4b is 50, key is test_row_1/B:col10/1732141441778/Put/seqid=0 2024-11-20T22:24:01,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741900_1076 (size=9857) 2024-11-20T22:24:02,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141501997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:24:02,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141502000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141501999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40218 deadline: 1732141502027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,029 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8290 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., hostname=6365a1e51efd,46811,1732141422048, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:02,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40230 deadline: 1732141502027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,031 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8293 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., hostname=6365a1e51efd,46811,1732141422048, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:02,055 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T22:24:02,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:02,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,056 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:24:02,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141502203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,210 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141502209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T22:24:02,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:02,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141502213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/f01b9bb7573d4630aabaabc5c7d53f4b 2024-11-20T22:24:02,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T22:24:02,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:02,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:02,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/9fa68a3d88504e409c8447548f0b9759 is 50, key is test_row_1/C:col10/1732141441778/Put/seqid=0 2024-11-20T22:24:02,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741901_1077 (size=9857) 2024-11-20T22:24:02,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/9fa68a3d88504e409c8447548f0b9759 2024-11-20T22:24:02,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/5efc976288a9461183bbe101a40ad9cf as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5efc976288a9461183bbe101a40ad9cf 2024-11-20T22:24:02,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5efc976288a9461183bbe101a40ad9cf, entries=200, sequenceid=307, filesize=14.4 K 2024-11-20T22:24:02,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/f01b9bb7573d4630aabaabc5c7d53f4b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f01b9bb7573d4630aabaabc5c7d53f4b 2024-11-20T22:24:02,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f01b9bb7573d4630aabaabc5c7d53f4b, entries=100, sequenceid=307, filesize=9.6 K 2024-11-20T22:24:02,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/9fa68a3d88504e409c8447548f0b9759 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/9fa68a3d88504e409c8447548f0b9759 2024-11-20T22:24:02,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/9fa68a3d88504e409c8447548f0b9759, entries=100, sequenceid=307, filesize=9.6 K 2024-11-20T22:24:02,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 175bc25ef8aacc6207ddcddcc7da4d90 in 712ms, sequenceid=307, compaction requested=true 2024-11-20T22:24:02,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:02,497 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:02,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:02,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:02,498 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:02,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:02,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:02,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:02,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:02,500 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52070 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:02,500 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:24:02,500 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,500 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/391c9d7ebc19433da60c6404d47c54cb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/289fc1fae7b84ab79fd8ccc70189c97c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/436bf98ede6d4642910060bd3bc97bf1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5efc976288a9461183bbe101a40ad9cf] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=50.8 K 2024-11-20T22:24:02,501 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:02,501 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:24:02,501 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,501 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/77d5cf472ecb4da6b620e46af29fd2bd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d9fbad44e3e24f6799895bf626e531da, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/20f25d079f974995bd3b869a9d505b4c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f01b9bb7573d4630aabaabc5c7d53f4b] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=46.1 K 2024-11-20T22:24:02,501 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 391c9d7ebc19433da60c6404d47c54cb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732141438600 2024-11-20T22:24:02,504 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 77d5cf472ecb4da6b620e46af29fd2bd, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732141438600 2024-11-20T22:24:02,504 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 289fc1fae7b84ab79fd8ccc70189c97c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732141439328 2024-11-20T22:24:02,506 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d9fbad44e3e24f6799895bf626e531da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732141439328 2024-11-20T22:24:02,506 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 436bf98ede6d4642910060bd3bc97bf1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732141440605 2024-11-20T22:24:02,507 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 20f25d079f974995bd3b869a9d505b4c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732141440605 2024-11-20T22:24:02,507 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5efc976288a9461183bbe101a40ad9cf, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732141441770 2024-11-20T22:24:02,507 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f01b9bb7573d4630aabaabc5c7d53f4b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732141441778 2024-11-20T22:24:02,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:24:02,522 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T22:24:02,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,523 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:24:02,530 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#63 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:02,531 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/014a4c53cd9a49cb9e74993abbecf4db is 50, key is test_row_0/A:col10/1732141440617/Put/seqid=0 2024-11-20T22:24:02,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:02,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:02,548 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#64 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:02,549 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/86f045c92f7c49d5850e1804a0fad08b is 50, key is test_row_0/B:col10/1732141440617/Put/seqid=0 2024-11-20T22:24:02,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd6980fb515149a2a466abe1a1714da3 is 50, key is test_row_0/A:col10/1732141441891/Put/seqid=0 2024-11-20T22:24:02,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141502565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141502563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141502567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741902_1078 (size=13017) 2024-11-20T22:24:02,608 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/014a4c53cd9a49cb9e74993abbecf4db as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/014a4c53cd9a49cb9e74993abbecf4db 2024-11-20T22:24:02,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741903_1079 (size=17181) 2024-11-20T22:24:02,636 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into 014a4c53cd9a49cb9e74993abbecf4db(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:02,636 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:02,636 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=12, startTime=1732141442497; duration=0sec 2024-11-20T22:24:02,636 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:02,636 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:24:02,636 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:02,639 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:02,639 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:24:02,639 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:02,639 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/c8a1a85f41f34d3193679a2dedece2e8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d74b4b5eac664e158a8a3c9b0aa46522, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/0bd68e5589c14d9d9bd8d827c08931d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/9fa68a3d88504e409c8447548f0b9759] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=46.1 K 2024-11-20T22:24:02,640 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8a1a85f41f34d3193679a2dedece2e8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732141438600 2024-11-20T22:24:02,641 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d74b4b5eac664e158a8a3c9b0aa46522, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732141439328 2024-11-20T22:24:02,641 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bd68e5589c14d9d9bd8d827c08931d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732141440605 2024-11-20T22:24:02,642 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fa68a3d88504e409c8447548f0b9759, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732141441778 2024-11-20T22:24:02,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741904_1080 (size=13017) 2024-11-20T22:24:02,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141502674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,678 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#66 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:02,679 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/cbd8ad62c85e4ae398838f8cef35a9b8 is 50, key is test_row_0/C:col10/1732141440617/Put/seqid=0 2024-11-20T22:24:02,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141502675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141502676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741905_1081 (size=13017) 2024-11-20T22:24:02,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141502879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141502890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:02,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:02,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141502897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:24:03,023 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd6980fb515149a2a466abe1a1714da3 2024-11-20T22:24:03,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/4665a3493393454c94acc04462ed590b is 50, key is test_row_0/B:col10/1732141441891/Put/seqid=0 2024-11-20T22:24:03,058 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/86f045c92f7c49d5850e1804a0fad08b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/86f045c92f7c49d5850e1804a0fad08b 2024-11-20T22:24:03,075 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into 86f045c92f7c49d5850e1804a0fad08b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:03,075 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:03,075 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=12, startTime=1732141442498; duration=0sec 2024-11-20T22:24:03,075 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:03,075 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:24:03,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741906_1082 (size=12301) 2024-11-20T22:24:03,086 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/4665a3493393454c94acc04462ed590b 2024-11-20T22:24:03,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/57e8c870d81448adb42222675b02ac40 is 50, key is test_row_0/C:col10/1732141441891/Put/seqid=0 2024-11-20T22:24:03,156 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/cbd8ad62c85e4ae398838f8cef35a9b8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cbd8ad62c85e4ae398838f8cef35a9b8 2024-11-20T22:24:03,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741907_1083 (size=12301) 2024-11-20T22:24:03,174 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into cbd8ad62c85e4ae398838f8cef35a9b8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:03,174 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:03,174 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=12, startTime=1732141442498; duration=0sec 2024-11-20T22:24:03,175 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:03,175 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:24:03,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141503184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141503204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141503209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,564 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/57e8c870d81448adb42222675b02ac40 2024-11-20T22:24:03,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd6980fb515149a2a466abe1a1714da3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd6980fb515149a2a466abe1a1714da3 2024-11-20T22:24:03,582 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd6980fb515149a2a466abe1a1714da3, entries=250, sequenceid=334, filesize=16.8 K 2024-11-20T22:24:03,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/4665a3493393454c94acc04462ed590b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/4665a3493393454c94acc04462ed590b 2024-11-20T22:24:03,593 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/4665a3493393454c94acc04462ed590b, entries=150, sequenceid=334, filesize=12.0 K 2024-11-20T22:24:03,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/57e8c870d81448adb42222675b02ac40 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57e8c870d81448adb42222675b02ac40 2024-11-20T22:24:03,608 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57e8c870d81448adb42222675b02ac40, entries=150, sequenceid=334, filesize=12.0 K 2024-11-20T22:24:03,609 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 175bc25ef8aacc6207ddcddcc7da4d90 in 1086ms, sequenceid=334, compaction requested=false 2024-11-20T22:24:03,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:03,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:03,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-20T22:24:03,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-20T22:24:03,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T22:24:03,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7100 sec 2024-11-20T22:24:03,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.7150 sec 2024-11-20T22:24:03,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:03,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:03,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:03,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:03,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:03,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:03,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:03,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:03,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/e9fa152a583d4a1698a5ad7f8bfa3b90 is 50, key is test_row_0/A:col10/1732141443699/Put/seqid=0 2024-11-20T22:24:03,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741908_1084 (size=12301) 2024-11-20T22:24:03,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/e9fa152a583d4a1698a5ad7f8bfa3b90 2024-11-20T22:24:03,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/b9ec9dfb5ac24d7298b8abdb00339a1d is 50, key is test_row_0/B:col10/1732141443699/Put/seqid=0 2024-11-20T22:24:03,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141503800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141503803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141503804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741909_1085 (size=12301) 2024-11-20T22:24:03,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141503907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141503910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:03,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:03,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141503912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T22:24:04,024 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T22:24:04,026 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:04,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-20T22:24:04,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:24:04,042 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:04,043 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:04,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:04,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141504114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141504117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141504119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:24:04,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T22:24:04,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:04,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/b9ec9dfb5ac24d7298b8abdb00339a1d 2024-11-20T22:24:04,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/e67bd58daf08449c891257e15d50766f is 50, key is test_row_0/C:col10/1732141443699/Put/seqid=0 2024-11-20T22:24:04,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:24:04,375 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741910_1086 (size=12301) 2024-11-20T22:24:04,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T22:24:04,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:04,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,383 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141504430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141504443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141504441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T22:24:04,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:04,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:24:04,705 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T22:24:04,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:04,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:04,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/e67bd58daf08449c891257e15d50766f 2024-11-20T22:24:04,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/e9fa152a583d4a1698a5ad7f8bfa3b90 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e9fa152a583d4a1698a5ad7f8bfa3b90 2024-11-20T22:24:04,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e9fa152a583d4a1698a5ad7f8bfa3b90, entries=150, sequenceid=348, filesize=12.0 K 2024-11-20T22:24:04,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/b9ec9dfb5ac24d7298b8abdb00339a1d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b9ec9dfb5ac24d7298b8abdb00339a1d 2024-11-20T22:24:04,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b9ec9dfb5ac24d7298b8abdb00339a1d, entries=150, sequenceid=348, filesize=12.0 K 2024-11-20T22:24:04,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/e67bd58daf08449c891257e15d50766f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/e67bd58daf08449c891257e15d50766f 2024-11-20T22:24:04,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/e67bd58daf08449c891257e15d50766f, entries=150, sequenceid=348, filesize=12.0 K 2024-11-20T22:24:04,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 175bc25ef8aacc6207ddcddcc7da4d90 in 1124ms, sequenceid=348, compaction requested=true 2024-11-20T22:24:04,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:04,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:04,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:04,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:04,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:04,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:04,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T22:24:04,828 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:04,828 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:04,829 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:04,829 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:24:04,829 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,830 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/014a4c53cd9a49cb9e74993abbecf4db, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd6980fb515149a2a466abe1a1714da3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e9fa152a583d4a1698a5ad7f8bfa3b90] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=41.5 K 2024-11-20T22:24:04,831 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:04,831 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:24:04,831 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,832 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cbd8ad62c85e4ae398838f8cef35a9b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57e8c870d81448adb42222675b02ac40, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/e67bd58daf08449c891257e15d50766f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=36.7 K 2024-11-20T22:24:04,832 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 014a4c53cd9a49cb9e74993abbecf4db, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732141440617 2024-11-20T22:24:04,832 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting cbd8ad62c85e4ae398838f8cef35a9b8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732141440617 2024-11-20T22:24:04,832 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd6980fb515149a2a466abe1a1714da3, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141441888 2024-11-20T22:24:04,833 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 57e8c870d81448adb42222675b02ac40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141441891 2024-11-20T22:24:04,833 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9fa152a583d4a1698a5ad7f8bfa3b90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1732141442565 2024-11-20T22:24:04,834 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e67bd58daf08449c891257e15d50766f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1732141442565 2024-11-20T22:24:04,858 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#72 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:04,858 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/d806a6173f8b4ffcbe8a8e8c3839ef03 is 50, key is test_row_0/A:col10/1732141443699/Put/seqid=0 2024-11-20T22:24:04,867 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T22:24:04,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:04,868 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:24:04,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:04,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:04,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:04,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:04,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd4a6ab690aa4f3cb1db497fcc8ab774 is 50, key is test_row_0/A:col10/1732141443760/Put/seqid=0 2024-11-20T22:24:04,905 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#74 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:04,906 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/1e178f98185a4f16955fed6cfedbf599 is 50, key is test_row_0/C:col10/1732141443699/Put/seqid=0 2024-11-20T22:24:04,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741911_1087 (size=13119) 2024-11-20T22:24:04,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:04,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:04,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741912_1088 (size=12301) 2024-11-20T22:24:04,962 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd4a6ab690aa4f3cb1db497fcc8ab774 2024-11-20T22:24:04,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741913_1089 (size=13119) 2024-11-20T22:24:04,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141504974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141504983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:04,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141504985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:04,995 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/1e178f98185a4f16955fed6cfedbf599 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/1e178f98185a4f16955fed6cfedbf599 2024-11-20T22:24:05,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/476d6e75990040708706370dbe34288d is 50, key is test_row_0/B:col10/1732141443760/Put/seqid=0 2024-11-20T22:24:05,015 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into 1e178f98185a4f16955fed6cfedbf599(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:05,015 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:05,015 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=13, startTime=1732141444825; duration=0sec 2024-11-20T22:24:05,015 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:05,015 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:24:05,015 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:05,020 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:05,020 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:24:05,020 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:05,020 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/86f045c92f7c49d5850e1804a0fad08b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/4665a3493393454c94acc04462ed590b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b9ec9dfb5ac24d7298b8abdb00339a1d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=36.7 K 2024-11-20T22:24:05,023 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 86f045c92f7c49d5850e1804a0fad08b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732141440617 2024-11-20T22:24:05,024 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4665a3493393454c94acc04462ed590b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732141441891 2024-11-20T22:24:05,025 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b9ec9dfb5ac24d7298b8abdb00339a1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1732141442565 2024-11-20T22:24:05,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741914_1090 (size=12301) 2024-11-20T22:24:05,059 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:05,060 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/842fb1f44acb49969f43060358185528 is 50, key is test_row_0/B:col10/1732141443699/Put/seqid=0 2024-11-20T22:24:05,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141505090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741915_1091 (size=13119) 2024-11-20T22:24:05,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141505094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141505094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:24:05,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141505297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141505311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141505312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,340 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/d806a6173f8b4ffcbe8a8e8c3839ef03 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d806a6173f8b4ffcbe8a8e8c3839ef03 2024-11-20T22:24:05,359 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into d806a6173f8b4ffcbe8a8e8c3839ef03(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:05,359 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:05,359 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=13, startTime=1732141444825; duration=0sec 2024-11-20T22:24:05,359 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:05,359 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:24:05,439 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/476d6e75990040708706370dbe34288d 2024-11-20T22:24:05,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/57193da5ba7640b9b5b7aca44bc55cb4 is 50, key is test_row_0/C:col10/1732141443760/Put/seqid=0 2024-11-20T22:24:05,515 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/842fb1f44acb49969f43060358185528 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/842fb1f44acb49969f43060358185528 2024-11-20T22:24:05,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741916_1092 (size=12301) 2024-11-20T22:24:05,531 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into 842fb1f44acb49969f43060358185528(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:05,531 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:05,531 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=13, startTime=1732141444825; duration=0sec 2024-11-20T22:24:05,531 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:05,531 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:24:05,532 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/57193da5ba7640b9b5b7aca44bc55cb4 2024-11-20T22:24:05,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/dd4a6ab690aa4f3cb1db497fcc8ab774 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd4a6ab690aa4f3cb1db497fcc8ab774 2024-11-20T22:24:05,569 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd4a6ab690aa4f3cb1db497fcc8ab774, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T22:24:05,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/476d6e75990040708706370dbe34288d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/476d6e75990040708706370dbe34288d 2024-11-20T22:24:05,595 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/476d6e75990040708706370dbe34288d, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T22:24:05,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/57193da5ba7640b9b5b7aca44bc55cb4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57193da5ba7640b9b5b7aca44bc55cb4 2024-11-20T22:24:05,616 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57193da5ba7640b9b5b7aca44bc55cb4, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T22:24:05,618 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 175bc25ef8aacc6207ddcddcc7da4d90 in 750ms, sequenceid=374, compaction requested=false 2024-11-20T22:24:05,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:05,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:05,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-20T22:24:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-20T22:24:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:05,630 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:05,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:05,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:05,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:05,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:05,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:05,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:05,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T22:24:05,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5780 sec 2024-11-20T22:24:05,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.6060 sec 2024-11-20T22:24:05,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/955484c2949744eb8f5bbac119c1535d is 50, key is test_row_0/A:col10/1732141445629/Put/seqid=0 2024-11-20T22:24:05,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741917_1093 (size=12301) 2024-11-20T22:24:05,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141505716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141505718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141505723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141505830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141505832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:05,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141505836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141506036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141506040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141506052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/955484c2949744eb8f5bbac119c1535d 2024-11-20T22:24:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T22:24:06,150 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-20T22:24:06,153 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-20T22:24:06,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/117812cd2e664597b7430ca4b4baf543 is 50, key is test_row_0/B:col10/1732141445629/Put/seqid=0 2024-11-20T22:24:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:24:06,156 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:06,157 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:06,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:06,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741918_1094 (size=12301) 2024-11-20T22:24:06,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/117812cd2e664597b7430ca4b4baf543 2024-11-20T22:24:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:24:06,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/7dea808925ee4c1ebb1339f4e32c50a1 is 50, key is test_row_0/C:col10/1732141445629/Put/seqid=0 2024-11-20T22:24:06,309 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T22:24:06,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:06,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:06,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:06,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:06,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741919_1095 (size=12301) 2024-11-20T22:24:06,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/7dea808925ee4c1ebb1339f4e32c50a1 2024-11-20T22:24:06,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/955484c2949744eb8f5bbac119c1535d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/955484c2949744eb8f5bbac119c1535d 2024-11-20T22:24:06,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/955484c2949744eb8f5bbac119c1535d, entries=150, sequenceid=389, filesize=12.0 K 2024-11-20T22:24:06,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141506345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/117812cd2e664597b7430ca4b4baf543 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/117812cd2e664597b7430ca4b4baf543 2024-11-20T22:24:06,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141506353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/117812cd2e664597b7430ca4b4baf543, entries=150, sequenceid=389, filesize=12.0 K 2024-11-20T22:24:06,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141506362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/7dea808925ee4c1ebb1339f4e32c50a1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/7dea808925ee4c1ebb1339f4e32c50a1 2024-11-20T22:24:06,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/7dea808925ee4c1ebb1339f4e32c50a1, entries=150, sequenceid=389, filesize=12.0 K 2024-11-20T22:24:06,382 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 175bc25ef8aacc6207ddcddcc7da4d90 in 749ms, sequenceid=389, compaction requested=true 2024-11-20T22:24:06,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:06,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:06,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:06,383 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:06,383 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:06,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:06,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:06,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:06,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:06,385 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:06,385 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:24:06,385 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:06,385 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/842fb1f44acb49969f43060358185528, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/476d6e75990040708706370dbe34288d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/117812cd2e664597b7430ca4b4baf543] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=36.8 K 2024-11-20T22:24:06,386 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:06,386 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:24:06,386 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:06,386 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d806a6173f8b4ffcbe8a8e8c3839ef03, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd4a6ab690aa4f3cb1db497fcc8ab774, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/955484c2949744eb8f5bbac119c1535d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=36.8 K 2024-11-20T22:24:06,387 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 842fb1f44acb49969f43060358185528, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1732141442565 2024-11-20T22:24:06,388 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d806a6173f8b4ffcbe8a8e8c3839ef03, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1732141442565 2024-11-20T22:24:06,388 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 476d6e75990040708706370dbe34288d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141443760 2024-11-20T22:24:06,389 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd4a6ab690aa4f3cb1db497fcc8ab774, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141443760 2024-11-20T22:24:06,390 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 117812cd2e664597b7430ca4b4baf543, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732141444956 2024-11-20T22:24:06,399 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 955484c2949744eb8f5bbac119c1535d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732141444956 2024-11-20T22:24:06,418 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#81 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:06,425 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/5c397cd99c324145b452bfc07f9ccfc6 is 50, key is test_row_0/B:col10/1732141445629/Put/seqid=0 2024-11-20T22:24:06,427 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:06,428 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/77be7825741840aca1a67dd81f02ee88 is 50, key is test_row_0/A:col10/1732141445629/Put/seqid=0 2024-11-20T22:24:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:24:06,469 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T22:24:06,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:06,470 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:24:06,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:06,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:06,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:06,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:06,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:06,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:06,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741920_1096 (size=13221) 2024-11-20T22:24:06,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741921_1097 (size=13221) 2024-11-20T22:24:06,522 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/77be7825741840aca1a67dd81f02ee88 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/77be7825741840aca1a67dd81f02ee88 2024-11-20T22:24:06,534 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into 77be7825741840aca1a67dd81f02ee88(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:06,534 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:06,534 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=13, startTime=1732141446382; duration=0sec 2024-11-20T22:24:06,534 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:06,534 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:24:06,534 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:06,536 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:06,536 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:24:06,536 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:06,536 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/1e178f98185a4f16955fed6cfedbf599, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57193da5ba7640b9b5b7aca44bc55cb4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/7dea808925ee4c1ebb1339f4e32c50a1] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=36.8 K 2024-11-20T22:24:06,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/d4a00c40d1684008ae23c22965d5d059 is 50, key is test_row_0/A:col10/1732141445720/Put/seqid=0 2024-11-20T22:24:06,541 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e178f98185a4f16955fed6cfedbf599, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1732141442565 2024-11-20T22:24:06,543 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57193da5ba7640b9b5b7aca44bc55cb4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732141443760 2024-11-20T22:24:06,546 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dea808925ee4c1ebb1339f4e32c50a1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732141444956 2024-11-20T22:24:06,584 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#84 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:06,585 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/aa803960144f47efbcd5be510dc3b7f5 is 50, key is test_row_0/C:col10/1732141445629/Put/seqid=0 2024-11-20T22:24:06,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741922_1098 (size=12301) 2024-11-20T22:24:06,625 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/d4a00c40d1684008ae23c22965d5d059 2024-11-20T22:24:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741923_1099 (size=13221) 2024-11-20T22:24:06,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/96ab408fcf6043b3a8db5a2a01d59675 is 50, key is test_row_0/B:col10/1732141445720/Put/seqid=0 2024-11-20T22:24:06,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741924_1100 (size=12301) 2024-11-20T22:24:06,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:24:06,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. as already flushing 2024-11-20T22:24:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:06,906 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/5c397cd99c324145b452bfc07f9ccfc6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/5c397cd99c324145b452bfc07f9ccfc6 2024-11-20T22:24:06,934 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into 5c397cd99c324145b452bfc07f9ccfc6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:06,934 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:06,934 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=13, startTime=1732141446383; duration=0sec 2024-11-20T22:24:06,935 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:06,935 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:24:06,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141506924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141506946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:06,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:06,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141506947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141507055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141507059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141507060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,100 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/aa803960144f47efbcd5be510dc3b7f5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/aa803960144f47efbcd5be510dc3b7f5 2024-11-20T22:24:07,111 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into aa803960144f47efbcd5be510dc3b7f5(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:07,111 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:07,111 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=13, startTime=1732141446384; duration=0sec 2024-11-20T22:24:07,112 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:07,112 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:24:07,138 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/96ab408fcf6043b3a8db5a2a01d59675 2024-11-20T22:24:07,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ba04fe4f6a0646dbb8a4e0720269790f is 50, key is test_row_0/C:col10/1732141445720/Put/seqid=0 2024-11-20T22:24:07,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741925_1101 (size=12301) 2024-11-20T22:24:07,213 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ba04fe4f6a0646dbb8a4e0720269790f 2024-11-20T22:24:07,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/d4a00c40d1684008ae23c22965d5d059 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d4a00c40d1684008ae23c22965d5d059 2024-11-20T22:24:07,251 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d4a00c40d1684008ae23c22965d5d059, entries=150, sequenceid=413, filesize=12.0 K 2024-11-20T22:24:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/96ab408fcf6043b3a8db5a2a01d59675 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/96ab408fcf6043b3a8db5a2a01d59675 2024-11-20T22:24:07,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:24:07,267 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/96ab408fcf6043b3a8db5a2a01d59675, entries=150, sequenceid=413, filesize=12.0 K 2024-11-20T22:24:07,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/ba04fe4f6a0646dbb8a4e0720269790f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ba04fe4f6a0646dbb8a4e0720269790f 2024-11-20T22:24:07,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141507261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,276 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ba04fe4f6a0646dbb8a4e0720269790f, entries=150, sequenceid=413, filesize=12.0 K 2024-11-20T22:24:07,277 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 175bc25ef8aacc6207ddcddcc7da4d90 in 807ms, sequenceid=413, compaction requested=false 2024-11-20T22:24:07,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:07,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:07,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-20T22:24:07,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-20T22:24:07,282 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-20T22:24:07,282 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1220 sec 2024-11-20T22:24:07,286 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.1290 sec 2024-11-20T22:24:07,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:07,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:24:07,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:07,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:07,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:07,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:07,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:07,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:07,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/4c7e5f666bf54e73bb216da9bf28ad7a is 50, key is test_row_0/A:col10/1732141447291/Put/seqid=0 2024-11-20T22:24:07,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741926_1102 (size=12301) 2024-11-20T22:24:07,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141507416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141507416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141507540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141507540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40184 deadline: 1732141507575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40206 deadline: 1732141507747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:07,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40242 deadline: 1732141507747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:07,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/4c7e5f666bf54e73bb216da9bf28ad7a 2024-11-20T22:24:07,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/8632250e4519474e8ad9cec7ae290609 is 50, key is test_row_0/B:col10/1732141447291/Put/seqid=0 2024-11-20T22:24:07,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741927_1103 (size=12301) 2024-11-20T22:24:07,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/8632250e4519474e8ad9cec7ae290609 2024-11-20T22:24:07,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/f0529539b85d46a883905abebc1ae9c9 is 50, key is test_row_0/C:col10/1732141447291/Put/seqid=0 2024-11-20T22:24:07,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741928_1104 (size=12301) 2024-11-20T22:24:07,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/f0529539b85d46a883905abebc1ae9c9 2024-11-20T22:24:07,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/4c7e5f666bf54e73bb216da9bf28ad7a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4c7e5f666bf54e73bb216da9bf28ad7a 2024-11-20T22:24:07,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4c7e5f666bf54e73bb216da9bf28ad7a, entries=150, sequenceid=431, filesize=12.0 K 2024-11-20T22:24:07,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/8632250e4519474e8ad9cec7ae290609 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8632250e4519474e8ad9cec7ae290609 2024-11-20T22:24:07,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8632250e4519474e8ad9cec7ae290609, entries=150, sequenceid=431, filesize=12.0 K 2024-11-20T22:24:07,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/f0529539b85d46a883905abebc1ae9c9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/f0529539b85d46a883905abebc1ae9c9 2024-11-20T22:24:07,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/f0529539b85d46a883905abebc1ae9c9, entries=150, sequenceid=431, filesize=12.0 K 2024-11-20T22:24:07,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 175bc25ef8aacc6207ddcddcc7da4d90 in 663ms, sequenceid=431, compaction requested=true 2024-11-20T22:24:07,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:07,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:07,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:07,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:07,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:07,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 175bc25ef8aacc6207ddcddcc7da4d90:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:07,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:07,979 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:07,979 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:07,991 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:07,991 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/A is initiating minor compaction (all files) 2024-11-20T22:24:07,991 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/A in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:07,992 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/77be7825741840aca1a67dd81f02ee88, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d4a00c40d1684008ae23c22965d5d059, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4c7e5f666bf54e73bb216da9bf28ad7a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=36.9 K 2024-11-20T22:24:07,992 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:07,992 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/B is initiating minor compaction (all files) 2024-11-20T22:24:07,992 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/B in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:07,992 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/5c397cd99c324145b452bfc07f9ccfc6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/96ab408fcf6043b3a8db5a2a01d59675, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8632250e4519474e8ad9cec7ae290609] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=36.9 K 2024-11-20T22:24:07,993 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 77be7825741840aca1a67dd81f02ee88, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732141444956 2024-11-20T22:24:07,994 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c397cd99c324145b452bfc07f9ccfc6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732141444956 2024-11-20T22:24:07,999 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d4a00c40d1684008ae23c22965d5d059, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732141445702 2024-11-20T22:24:07,999 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96ab408fcf6043b3a8db5a2a01d59675, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732141445702 2024-11-20T22:24:08,000 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c7e5f666bf54e73bb216da9bf28ad7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732141446914 2024-11-20T22:24:08,000 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8632250e4519474e8ad9cec7ae290609, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732141446914 2024-11-20T22:24:08,028 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#A#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:08,029 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/ba1d989ba47a4c3fabf2483e48bab32c is 50, key is test_row_0/A:col10/1732141447291/Put/seqid=0 2024-11-20T22:24:08,033 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#B#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:08,034 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/e5781ee3069a44ff81d7de5ef24cb47c is 50, key is test_row_0/B:col10/1732141447291/Put/seqid=0 2024-11-20T22:24:08,056 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x33a38638 to 127.0.0.1:51916 2024-11-20T22:24:08,056 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:08,057 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72105e62 to 127.0.0.1:51916 2024-11-20T22:24:08,057 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:08,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:08,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:08,059 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x73a92982 to 127.0.0.1:51916 2024-11-20T22:24:08,059 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:08,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:08,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:08,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:08,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:08,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:08,061 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b0bb6c to 127.0.0.1:51916 2024-11-20T22:24:08,061 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:08,067 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x74feaf47 to 127.0.0.1:51916 2024-11-20T22:24:08,067 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:08,067 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x619ba81c to 127.0.0.1:51916 2024-11-20T22:24:08,067 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:08,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741929_1105 (size=13323) 2024-11-20T22:24:08,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741930_1106 (size=13323) 2024-11-20T22:24:08,088 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/ba1d989ba47a4c3fabf2483e48bab32c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/ba1d989ba47a4c3fabf2483e48bab32c 2024-11-20T22:24:08,090 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d1a674a to 127.0.0.1:51916 2024-11-20T22:24:08,090 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:08,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/e71a62e5339b4371b239d1a62be7ead0 is 50, key is test_row_0/A:col10/1732141447414/Put/seqid=0 2024-11-20T22:24:08,107 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/e5781ee3069a44ff81d7de5ef24cb47c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5781ee3069a44ff81d7de5ef24cb47c 2024-11-20T22:24:08,119 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/B of 175bc25ef8aacc6207ddcddcc7da4d90 into e5781ee3069a44ff81d7de5ef24cb47c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:08,119 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:08,119 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/B, priority=13, startTime=1732141447974; duration=0sec 2024-11-20T22:24:08,119 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:08,119 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:B 2024-11-20T22:24:08,119 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:08,120 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/A of 175bc25ef8aacc6207ddcddcc7da4d90 into ba1d989ba47a4c3fabf2483e48bab32c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:08,120 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:08,120 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/A, priority=13, startTime=1732141447974; duration=0sec 2024-11-20T22:24:08,120 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:08,120 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:A 2024-11-20T22:24:08,121 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:08,121 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 175bc25ef8aacc6207ddcddcc7da4d90/C is initiating minor compaction (all files) 2024-11-20T22:24:08,121 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 175bc25ef8aacc6207ddcddcc7da4d90/C in TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:08,121 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/aa803960144f47efbcd5be510dc3b7f5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ba04fe4f6a0646dbb8a4e0720269790f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/f0529539b85d46a883905abebc1ae9c9] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp, totalSize=36.9 K 2024-11-20T22:24:08,123 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa803960144f47efbcd5be510dc3b7f5, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732141444956 2024-11-20T22:24:08,124 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba04fe4f6a0646dbb8a4e0720269790f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732141445702 2024-11-20T22:24:08,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741931_1107 (size=14741) 2024-11-20T22:24:08,127 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0529539b85d46a883905abebc1ae9c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732141446914 2024-11-20T22:24:08,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/e71a62e5339b4371b239d1a62be7ead0 2024-11-20T22:24:08,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/57bf018e9704462d8534ed9256524e7d is 50, key is test_row_0/B:col10/1732141447414/Put/seqid=0 2024-11-20T22:24:08,149 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 175bc25ef8aacc6207ddcddcc7da4d90#C#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:08,150 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/472f0d5f768748c0a1fc622adde7b2e7 is 50, key is test_row_0/C:col10/1732141447291/Put/seqid=0 2024-11-20T22:24:08,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741932_1108 (size=12301) 2024-11-20T22:24:08,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/57bf018e9704462d8534ed9256524e7d 2024-11-20T22:24:08,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741933_1109 (size=13323) 2024-11-20T22:24:08,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/55354124847e4149bfd42a987516b913 is 50, key is test_row_0/C:col10/1732141447414/Put/seqid=0 2024-11-20T22:24:08,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741934_1110 (size=12301) 2024-11-20T22:24:08,238 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/472f0d5f768748c0a1fc622adde7b2e7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/472f0d5f768748c0a1fc622adde7b2e7 2024-11-20T22:24:08,251 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 175bc25ef8aacc6207ddcddcc7da4d90/C of 175bc25ef8aacc6207ddcddcc7da4d90 into 472f0d5f768748c0a1fc622adde7b2e7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:08,251 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:08,251 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90., storeName=175bc25ef8aacc6207ddcddcc7da4d90/C, priority=13, startTime=1732141447975; duration=0sec 2024-11-20T22:24:08,262 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:08,262 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 175bc25ef8aacc6207ddcddcc7da4d90:C 2024-11-20T22:24:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T22:24:08,265 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T22:24:08,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/55354124847e4149bfd42a987516b913 2024-11-20T22:24:08,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/e71a62e5339b4371b239d1a62be7ead0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e71a62e5339b4371b239d1a62be7ead0 2024-11-20T22:24:08,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e71a62e5339b4371b239d1a62be7ead0, entries=200, sequenceid=453, filesize=14.4 K 2024-11-20T22:24:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/57bf018e9704462d8534ed9256524e7d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/57bf018e9704462d8534ed9256524e7d 2024-11-20T22:24:08,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/57bf018e9704462d8534ed9256524e7d, entries=150, sequenceid=453, filesize=12.0 K 2024-11-20T22:24:08,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/55354124847e4149bfd42a987516b913 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/55354124847e4149bfd42a987516b913 2024-11-20T22:24:08,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/55354124847e4149bfd42a987516b913, entries=150, sequenceid=453, filesize=12.0 K 2024-11-20T22:24:08,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=13.42 KB/13740 for 175bc25ef8aacc6207ddcddcc7da4d90 in 700ms, sequenceid=453, compaction requested=false 2024-11-20T22:24:08,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:09,159 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:24:12,093 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4adb5511 to 127.0.0.1:51916 2024-11-20T22:24:12,093 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:12,114 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x16defd30 to 127.0.0.1:51916 2024-11-20T22:24:12,114 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 101 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 107 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2211 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2231 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1066 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3195 rows 2024-11-20T22:24:12,114 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1051 2024-11-20T22:24:12,115 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3152 rows 2024-11-20T22:24:12,115 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:24:12,115 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78f1301b to 127.0.0.1:51916 2024-11-20T22:24:12,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:12,117 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:24:12,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:24:12,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:12,132 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141452131"}]},"ts":"1732141452131"} 2024-11-20T22:24:12,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:24:12,134 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:24:12,160 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:24:12,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:24:12,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=175bc25ef8aacc6207ddcddcc7da4d90, UNASSIGN}] 2024-11-20T22:24:12,169 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=175bc25ef8aacc6207ddcddcc7da4d90, UNASSIGN 2024-11-20T22:24:12,169 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=175bc25ef8aacc6207ddcddcc7da4d90, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:12,171 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:24:12,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure 175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:24:12,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:24:12,327 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:12,329 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:12,329 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing 175bc25ef8aacc6207ddcddcc7da4d90, disabling compactions & flushes 2024-11-20T22:24:12,330 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. after waiting 0 ms 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:12,330 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing 175bc25ef8aacc6207ddcddcc7da4d90 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=A 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=B 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 175bc25ef8aacc6207ddcddcc7da4d90, store=C 2024-11-20T22:24:12,330 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:12,334 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/8d3f74ac04d147ae82fddc00f99f94ab is 50, key is test_row_0/A:col10/1732141452113/Put/seqid=0 2024-11-20T22:24:12,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741935_1111 (size=12301) 2024-11-20T22:24:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:24:12,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:24:12,739 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/8d3f74ac04d147ae82fddc00f99f94ab 2024-11-20T22:24:12,760 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/53da575dc9334e02b11cc2c4d060bfe5 is 50, key is test_row_0/B:col10/1732141452113/Put/seqid=0 2024-11-20T22:24:12,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741936_1112 (size=12301) 2024-11-20T22:24:13,185 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/53da575dc9334e02b11cc2c4d060bfe5 2024-11-20T22:24:13,196 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/dc131230963c4619b23e5bdf92c21886 is 50, key is test_row_0/C:col10/1732141452113/Put/seqid=0 2024-11-20T22:24:13,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741937_1113 (size=12301) 2024-11-20T22:24:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:24:13,601 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/dc131230963c4619b23e5bdf92c21886 2024-11-20T22:24:13,612 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/A/8d3f74ac04d147ae82fddc00f99f94ab as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8d3f74ac04d147ae82fddc00f99f94ab 2024-11-20T22:24:13,620 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8d3f74ac04d147ae82fddc00f99f94ab, entries=150, sequenceid=463, filesize=12.0 K 2024-11-20T22:24:13,626 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/B/53da575dc9334e02b11cc2c4d060bfe5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/53da575dc9334e02b11cc2c4d060bfe5 2024-11-20T22:24:13,637 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/53da575dc9334e02b11cc2c4d060bfe5, entries=150, sequenceid=463, filesize=12.0 K 2024-11-20T22:24:13,638 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/.tmp/C/dc131230963c4619b23e5bdf92c21886 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dc131230963c4619b23e5bdf92c21886 2024-11-20T22:24:13,650 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dc131230963c4619b23e5bdf92c21886, entries=150, sequenceid=463, filesize=12.0 K 2024-11-20T22:24:13,651 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 175bc25ef8aacc6207ddcddcc7da4d90 in 1321ms, sequenceid=463, compaction requested=true 2024-11-20T22:24:13,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7f13db094ee9405496c31b5eb6a85f11, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/da765452131a476088017c633a9e39bd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8933cf7a71c3427296ba71bb453e034a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/85e1e46af61048c2b171b39c038a9cb8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5237bffaf1ff482b8b847b9f25df87f2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/b46dae7e3f5845349b544c986ffe4d19, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a941ca588f414160a4172b76b1893c8a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/59c35ef445fc41b784f28f5817e41e8b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/f04230df08fd42928c6b4787be909d97, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/fcd02affc1c84f1285f82c99e85928a5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7806b20ac1714a228c6e427d3009517f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd8a797cc6234d2a911a16d4d013751d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/0ed185652132461db82066992f283e47, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d445901f96eb46e2b4a9ef0a4b393a89, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a0cb1a20716947feb2a9001624061a9b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/96a052a782e4417a94a0d194ffb3fcc5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/391c9d7ebc19433da60c6404d47c54cb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4e7a7f175bfe4275bb8a6f0bde35e053, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/289fc1fae7b84ab79fd8ccc70189c97c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/436bf98ede6d4642910060bd3bc97bf1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5efc976288a9461183bbe101a40ad9cf, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/014a4c53cd9a49cb9e74993abbecf4db, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd6980fb515149a2a466abe1a1714da3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d806a6173f8b4ffcbe8a8e8c3839ef03, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e9fa152a583d4a1698a5ad7f8bfa3b90, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd4a6ab690aa4f3cb1db497fcc8ab774, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/77be7825741840aca1a67dd81f02ee88, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/955484c2949744eb8f5bbac119c1535d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d4a00c40d1684008ae23c22965d5d059, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4c7e5f666bf54e73bb216da9bf28ad7a] to archive 2024-11-20T22:24:13,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:13,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7f13db094ee9405496c31b5eb6a85f11 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7f13db094ee9405496c31b5eb6a85f11 2024-11-20T22:24:13,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/da765452131a476088017c633a9e39bd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/da765452131a476088017c633a9e39bd 2024-11-20T22:24:13,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8933cf7a71c3427296ba71bb453e034a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8933cf7a71c3427296ba71bb453e034a 2024-11-20T22:24:13,702 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/85e1e46af61048c2b171b39c038a9cb8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/85e1e46af61048c2b171b39c038a9cb8 2024-11-20T22:24:13,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5237bffaf1ff482b8b847b9f25df87f2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5237bffaf1ff482b8b847b9f25df87f2 2024-11-20T22:24:13,708 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/b46dae7e3f5845349b544c986ffe4d19 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/b46dae7e3f5845349b544c986ffe4d19 2024-11-20T22:24:13,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a941ca588f414160a4172b76b1893c8a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a941ca588f414160a4172b76b1893c8a 2024-11-20T22:24:13,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/59c35ef445fc41b784f28f5817e41e8b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/59c35ef445fc41b784f28f5817e41e8b 2024-11-20T22:24:13,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/f04230df08fd42928c6b4787be909d97 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/f04230df08fd42928c6b4787be909d97 2024-11-20T22:24:13,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/fcd02affc1c84f1285f82c99e85928a5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/fcd02affc1c84f1285f82c99e85928a5 2024-11-20T22:24:13,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7806b20ac1714a228c6e427d3009517f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/7806b20ac1714a228c6e427d3009517f 2024-11-20T22:24:13,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd8a797cc6234d2a911a16d4d013751d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd8a797cc6234d2a911a16d4d013751d 2024-11-20T22:24:13,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/0ed185652132461db82066992f283e47 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/0ed185652132461db82066992f283e47 2024-11-20T22:24:13,738 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d445901f96eb46e2b4a9ef0a4b393a89 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d445901f96eb46e2b4a9ef0a4b393a89 2024-11-20T22:24:13,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a0cb1a20716947feb2a9001624061a9b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/a0cb1a20716947feb2a9001624061a9b 2024-11-20T22:24:13,742 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/96a052a782e4417a94a0d194ffb3fcc5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/96a052a782e4417a94a0d194ffb3fcc5 2024-11-20T22:24:13,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/391c9d7ebc19433da60c6404d47c54cb to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/391c9d7ebc19433da60c6404d47c54cb 2024-11-20T22:24:13,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4e7a7f175bfe4275bb8a6f0bde35e053 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4e7a7f175bfe4275bb8a6f0bde35e053 2024-11-20T22:24:13,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/289fc1fae7b84ab79fd8ccc70189c97c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/289fc1fae7b84ab79fd8ccc70189c97c 2024-11-20T22:24:13,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/436bf98ede6d4642910060bd3bc97bf1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/436bf98ede6d4642910060bd3bc97bf1 2024-11-20T22:24:13,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5efc976288a9461183bbe101a40ad9cf to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/5efc976288a9461183bbe101a40ad9cf 2024-11-20T22:24:13,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/014a4c53cd9a49cb9e74993abbecf4db to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/014a4c53cd9a49cb9e74993abbecf4db 2024-11-20T22:24:13,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd6980fb515149a2a466abe1a1714da3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd6980fb515149a2a466abe1a1714da3 2024-11-20T22:24:13,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d806a6173f8b4ffcbe8a8e8c3839ef03 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d806a6173f8b4ffcbe8a8e8c3839ef03 2024-11-20T22:24:13,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e9fa152a583d4a1698a5ad7f8bfa3b90 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e9fa152a583d4a1698a5ad7f8bfa3b90 2024-11-20T22:24:13,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd4a6ab690aa4f3cb1db497fcc8ab774 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/dd4a6ab690aa4f3cb1db497fcc8ab774 2024-11-20T22:24:13,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/77be7825741840aca1a67dd81f02ee88 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/77be7825741840aca1a67dd81f02ee88 2024-11-20T22:24:13,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/955484c2949744eb8f5bbac119c1535d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/955484c2949744eb8f5bbac119c1535d 2024-11-20T22:24:13,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d4a00c40d1684008ae23c22965d5d059 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/d4a00c40d1684008ae23c22965d5d059 2024-11-20T22:24:13,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4c7e5f666bf54e73bb216da9bf28ad7a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/4c7e5f666bf54e73bb216da9bf28ad7a 2024-11-20T22:24:13,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d29e9839c8bf4c148e69c37fe429308e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e6b7f2d7780c4cddbcf6d765f4e00a2c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8f2e65538875446db7923b937efb8cfc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/c68d325e692b4284906b0423abaf28e6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/bd6682dbbb514251bd51a5b7d48c4686, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5cdd33944f84645ade7b4ea48cc33ad, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b562521754094d0b9f59f71451d031ff, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/ae3b86f957a34f82b6d900df4c89aafc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/3f228cb1012b4db18b80ced8265d6c2b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/916c11e51a6b4c7f89392c3245f8b312, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/09c63b1b608e4e85998d990e2944ccbc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/6fc75abbab1f4b45b772226d0b9f6f8c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/088b3324c34b422fb0de33d695bf4f5a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7988020eaeb5446eb9885f6118ece42f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/fefd7512f3ac44b29052db4b2c980128, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f3bd23b3f8ff418c97daa972cffe8ea6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/77d5cf472ecb4da6b620e46af29fd2bd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7be0f84c23c0497a9b15848ff086bf1a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d9fbad44e3e24f6799895bf626e531da, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/20f25d079f974995bd3b869a9d505b4c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/86f045c92f7c49d5850e1804a0fad08b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f01b9bb7573d4630aabaabc5c7d53f4b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/4665a3493393454c94acc04462ed590b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/842fb1f44acb49969f43060358185528, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b9ec9dfb5ac24d7298b8abdb00339a1d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/476d6e75990040708706370dbe34288d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/5c397cd99c324145b452bfc07f9ccfc6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/117812cd2e664597b7430ca4b4baf543, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/96ab408fcf6043b3a8db5a2a01d59675, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8632250e4519474e8ad9cec7ae290609] to archive 2024-11-20T22:24:13,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:13,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d29e9839c8bf4c148e69c37fe429308e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d29e9839c8bf4c148e69c37fe429308e 2024-11-20T22:24:13,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e6b7f2d7780c4cddbcf6d765f4e00a2c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e6b7f2d7780c4cddbcf6d765f4e00a2c 2024-11-20T22:24:13,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8f2e65538875446db7923b937efb8cfc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8f2e65538875446db7923b937efb8cfc 2024-11-20T22:24:13,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/c68d325e692b4284906b0423abaf28e6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/c68d325e692b4284906b0423abaf28e6 2024-11-20T22:24:13,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/bd6682dbbb514251bd51a5b7d48c4686 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/bd6682dbbb514251bd51a5b7d48c4686 2024-11-20T22:24:13,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5cdd33944f84645ade7b4ea48cc33ad to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5cdd33944f84645ade7b4ea48cc33ad 2024-11-20T22:24:13,849 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b562521754094d0b9f59f71451d031ff to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b562521754094d0b9f59f71451d031ff 2024-11-20T22:24:13,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/ae3b86f957a34f82b6d900df4c89aafc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/ae3b86f957a34f82b6d900df4c89aafc 2024-11-20T22:24:13,858 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/3f228cb1012b4db18b80ced8265d6c2b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/3f228cb1012b4db18b80ced8265d6c2b 2024-11-20T22:24:13,861 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/916c11e51a6b4c7f89392c3245f8b312 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/916c11e51a6b4c7f89392c3245f8b312 2024-11-20T22:24:13,863 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/09c63b1b608e4e85998d990e2944ccbc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/09c63b1b608e4e85998d990e2944ccbc 2024-11-20T22:24:13,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/6fc75abbab1f4b45b772226d0b9f6f8c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/6fc75abbab1f4b45b772226d0b9f6f8c 2024-11-20T22:24:13,868 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/088b3324c34b422fb0de33d695bf4f5a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/088b3324c34b422fb0de33d695bf4f5a 2024-11-20T22:24:13,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7988020eaeb5446eb9885f6118ece42f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7988020eaeb5446eb9885f6118ece42f 2024-11-20T22:24:13,876 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/fefd7512f3ac44b29052db4b2c980128 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/fefd7512f3ac44b29052db4b2c980128 2024-11-20T22:24:13,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f3bd23b3f8ff418c97daa972cffe8ea6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f3bd23b3f8ff418c97daa972cffe8ea6 2024-11-20T22:24:13,882 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/77d5cf472ecb4da6b620e46af29fd2bd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/77d5cf472ecb4da6b620e46af29fd2bd 2024-11-20T22:24:13,886 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7be0f84c23c0497a9b15848ff086bf1a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/7be0f84c23c0497a9b15848ff086bf1a 2024-11-20T22:24:13,890 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d9fbad44e3e24f6799895bf626e531da to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/d9fbad44e3e24f6799895bf626e531da 2024-11-20T22:24:13,893 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/20f25d079f974995bd3b869a9d505b4c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/20f25d079f974995bd3b869a9d505b4c 2024-11-20T22:24:13,895 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/86f045c92f7c49d5850e1804a0fad08b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/86f045c92f7c49d5850e1804a0fad08b 2024-11-20T22:24:13,898 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f01b9bb7573d4630aabaabc5c7d53f4b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/f01b9bb7573d4630aabaabc5c7d53f4b 2024-11-20T22:24:13,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/4665a3493393454c94acc04462ed590b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/4665a3493393454c94acc04462ed590b 2024-11-20T22:24:13,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/842fb1f44acb49969f43060358185528 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/842fb1f44acb49969f43060358185528 2024-11-20T22:24:13,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b9ec9dfb5ac24d7298b8abdb00339a1d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/b9ec9dfb5ac24d7298b8abdb00339a1d 2024-11-20T22:24:13,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/476d6e75990040708706370dbe34288d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/476d6e75990040708706370dbe34288d 2024-11-20T22:24:13,916 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/5c397cd99c324145b452bfc07f9ccfc6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/5c397cd99c324145b452bfc07f9ccfc6 2024-11-20T22:24:13,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/117812cd2e664597b7430ca4b4baf543 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/117812cd2e664597b7430ca4b4baf543 2024-11-20T22:24:13,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/96ab408fcf6043b3a8db5a2a01d59675 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/96ab408fcf6043b3a8db5a2a01d59675 2024-11-20T22:24:13,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8632250e4519474e8ad9cec7ae290609 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/8632250e4519474e8ad9cec7ae290609 2024-11-20T22:24:13,929 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bf620aa465de42d7839018a0e9565789, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/4a5c86b0096c4bf1ab03bada44a85384, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ecbe8d3a0c56418383b1640d87f8e644, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d0399cc895474e96b560125f08aa8c49, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/890a3fad22c64d4ca39c45fc26e30508, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d7e3dc9ba0614e9392757d68dbf12750, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2b179690856f4b099d1cf8e0c45b6038, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/de6b5fe3ca684568b099f905795d8da5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bcb23bb7d9544120aaa78472f981e1c8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cf3545c62aa54927a544f74c8b8d848c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/553c8d84d2bc4c30adea3458906d6cfd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/afbc0f64a32c4df69bdb7dc50aea3c58, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/a2f1cef32c224c9f94d58ea9c7a074d5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dce45913be7f47b6af04697dac8a57b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2ca3adacd1a5421197bf25ec09a5c5a7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ee5282deb1174a03994c721dea91a110, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/c8a1a85f41f34d3193679a2dedece2e8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ac25df4daf3d48efb2986a802b38f18e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d74b4b5eac664e158a8a3c9b0aa46522, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/0bd68e5589c14d9d9bd8d827c08931d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cbd8ad62c85e4ae398838f8cef35a9b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/9fa68a3d88504e409c8447548f0b9759, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57e8c870d81448adb42222675b02ac40, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/1e178f98185a4f16955fed6cfedbf599, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/e67bd58daf08449c891257e15d50766f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57193da5ba7640b9b5b7aca44bc55cb4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/aa803960144f47efbcd5be510dc3b7f5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/7dea808925ee4c1ebb1339f4e32c50a1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ba04fe4f6a0646dbb8a4e0720269790f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/f0529539b85d46a883905abebc1ae9c9] to archive 2024-11-20T22:24:13,931 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:13,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bf620aa465de42d7839018a0e9565789 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bf620aa465de42d7839018a0e9565789 2024-11-20T22:24:13,940 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/4a5c86b0096c4bf1ab03bada44a85384 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/4a5c86b0096c4bf1ab03bada44a85384 2024-11-20T22:24:13,942 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ecbe8d3a0c56418383b1640d87f8e644 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ecbe8d3a0c56418383b1640d87f8e644 2024-11-20T22:24:13,945 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d0399cc895474e96b560125f08aa8c49 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d0399cc895474e96b560125f08aa8c49 2024-11-20T22:24:13,949 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/890a3fad22c64d4ca39c45fc26e30508 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/890a3fad22c64d4ca39c45fc26e30508 2024-11-20T22:24:13,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d7e3dc9ba0614e9392757d68dbf12750 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d7e3dc9ba0614e9392757d68dbf12750 2024-11-20T22:24:13,958 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2b179690856f4b099d1cf8e0c45b6038 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2b179690856f4b099d1cf8e0c45b6038 2024-11-20T22:24:13,961 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/de6b5fe3ca684568b099f905795d8da5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/de6b5fe3ca684568b099f905795d8da5 2024-11-20T22:24:13,963 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bcb23bb7d9544120aaa78472f981e1c8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/bcb23bb7d9544120aaa78472f981e1c8 2024-11-20T22:24:13,965 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cf3545c62aa54927a544f74c8b8d848c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cf3545c62aa54927a544f74c8b8d848c 2024-11-20T22:24:13,969 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/553c8d84d2bc4c30adea3458906d6cfd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/553c8d84d2bc4c30adea3458906d6cfd 2024-11-20T22:24:13,973 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/afbc0f64a32c4df69bdb7dc50aea3c58 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/afbc0f64a32c4df69bdb7dc50aea3c58 2024-11-20T22:24:13,978 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/a2f1cef32c224c9f94d58ea9c7a074d5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/a2f1cef32c224c9f94d58ea9c7a074d5 2024-11-20T22:24:13,984 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dce45913be7f47b6af04697dac8a57b8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dce45913be7f47b6af04697dac8a57b8 2024-11-20T22:24:13,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2ca3adacd1a5421197bf25ec09a5c5a7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/2ca3adacd1a5421197bf25ec09a5c5a7 2024-11-20T22:24:13,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ee5282deb1174a03994c721dea91a110 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ee5282deb1174a03994c721dea91a110 2024-11-20T22:24:13,994 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/c8a1a85f41f34d3193679a2dedece2e8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/c8a1a85f41f34d3193679a2dedece2e8 2024-11-20T22:24:13,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ac25df4daf3d48efb2986a802b38f18e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ac25df4daf3d48efb2986a802b38f18e 2024-11-20T22:24:14,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d74b4b5eac664e158a8a3c9b0aa46522 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/d74b4b5eac664e158a8a3c9b0aa46522 2024-11-20T22:24:14,004 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/0bd68e5589c14d9d9bd8d827c08931d2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/0bd68e5589c14d9d9bd8d827c08931d2 2024-11-20T22:24:14,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cbd8ad62c85e4ae398838f8cef35a9b8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/cbd8ad62c85e4ae398838f8cef35a9b8 2024-11-20T22:24:14,012 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/9fa68a3d88504e409c8447548f0b9759 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/9fa68a3d88504e409c8447548f0b9759 2024-11-20T22:24:14,015 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57e8c870d81448adb42222675b02ac40 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57e8c870d81448adb42222675b02ac40 2024-11-20T22:24:14,027 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/1e178f98185a4f16955fed6cfedbf599 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/1e178f98185a4f16955fed6cfedbf599 2024-11-20T22:24:14,030 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/e67bd58daf08449c891257e15d50766f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/e67bd58daf08449c891257e15d50766f 2024-11-20T22:24:14,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57193da5ba7640b9b5b7aca44bc55cb4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/57193da5ba7640b9b5b7aca44bc55cb4 2024-11-20T22:24:14,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/aa803960144f47efbcd5be510dc3b7f5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/aa803960144f47efbcd5be510dc3b7f5 2024-11-20T22:24:14,041 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/7dea808925ee4c1ebb1339f4e32c50a1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/7dea808925ee4c1ebb1339f4e32c50a1 2024-11-20T22:24:14,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ba04fe4f6a0646dbb8a4e0720269790f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/ba04fe4f6a0646dbb8a4e0720269790f 2024-11-20T22:24:14,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/f0529539b85d46a883905abebc1ae9c9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/f0529539b85d46a883905abebc1ae9c9 2024-11-20T22:24:14,073 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/recovered.edits/466.seqid, newMaxSeqId=466, maxSeqId=1 2024-11-20T22:24:14,077 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90. 2024-11-20T22:24:14,077 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for 175bc25ef8aacc6207ddcddcc7da4d90: 2024-11-20T22:24:14,084 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed 175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:14,085 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=175bc25ef8aacc6207ddcddcc7da4d90, regionState=CLOSED 2024-11-20T22:24:14,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T22:24:14,093 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure 175bc25ef8aacc6207ddcddcc7da4d90, server=6365a1e51efd,46811,1732141422048 in 1.9160 sec 2024-11-20T22:24:14,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-11-20T22:24:14,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=175bc25ef8aacc6207ddcddcc7da4d90, UNASSIGN in 1.9220 sec 2024-11-20T22:24:14,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T22:24:14,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9390 sec 2024-11-20T22:24:14,105 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141454105"}]},"ts":"1732141454105"} 2024-11-20T22:24:14,108 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:24:14,152 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:24:14,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0290 sec 2024-11-20T22:24:14,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T22:24:14,238 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-20T22:24:14,242 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:24:14,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:14,246 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:14,248 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:14,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T22:24:14,251 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:14,254 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/recovered.edits] 2024-11-20T22:24:14,257 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8d3f74ac04d147ae82fddc00f99f94ab to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/8d3f74ac04d147ae82fddc00f99f94ab 2024-11-20T22:24:14,259 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/ba1d989ba47a4c3fabf2483e48bab32c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/ba1d989ba47a4c3fabf2483e48bab32c 2024-11-20T22:24:14,260 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e71a62e5339b4371b239d1a62be7ead0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/A/e71a62e5339b4371b239d1a62be7ead0 2024-11-20T22:24:14,262 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/53da575dc9334e02b11cc2c4d060bfe5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/53da575dc9334e02b11cc2c4d060bfe5 2024-11-20T22:24:14,264 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/57bf018e9704462d8534ed9256524e7d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/57bf018e9704462d8534ed9256524e7d 2024-11-20T22:24:14,265 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5781ee3069a44ff81d7de5ef24cb47c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/B/e5781ee3069a44ff81d7de5ef24cb47c 2024-11-20T22:24:14,267 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/472f0d5f768748c0a1fc622adde7b2e7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/472f0d5f768748c0a1fc622adde7b2e7 2024-11-20T22:24:14,268 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/55354124847e4149bfd42a987516b913 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/55354124847e4149bfd42a987516b913 2024-11-20T22:24:14,270 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dc131230963c4619b23e5bdf92c21886 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/C/dc131230963c4619b23e5bdf92c21886 2024-11-20T22:24:14,272 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/recovered.edits/466.seqid to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90/recovered.edits/466.seqid 2024-11-20T22:24:14,273 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/175bc25ef8aacc6207ddcddcc7da4d90 2024-11-20T22:24:14,273 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:24:14,277 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:14,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T22:24:14,284 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:24:14,312 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:24:14,313 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:14,313 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:24:14,313 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141454313"}]},"ts":"9223372036854775807"} 2024-11-20T22:24:14,316 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:24:14,316 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 175bc25ef8aacc6207ddcddcc7da4d90, NAME => 'TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:24:14,316 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:24:14,316 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141454316"}]},"ts":"9223372036854775807"} 2024-11-20T22:24:14,318 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:24:14,327 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:14,329 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 85 msec 2024-11-20T22:24:14,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T22:24:14,349 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-20T22:24:14,360 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=239 (was 220) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2514ba89-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1365377924_22 at /127.0.0.1:60948 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2514ba89-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;6365a1e51efd:46811-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2514ba89-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2514ba89-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=454 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1010 (was 710) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1603 (was 2578) 2024-11-20T22:24:14,369 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=1010, ProcessCount=11, AvailableMemoryMB=1603 2024-11-20T22:24:14,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:24:14,371 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:24:14,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:14,372 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:24:14,372 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:14,372 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-11-20T22:24:14,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T22:24:14,373 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:24:14,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741938_1114 (size=963) 2024-11-20T22:24:14,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T22:24:14,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T22:24:14,783 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:24:14,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741939_1115 (size=53) 2024-11-20T22:24:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T22:24:15,194 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:15,194 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing fe73e78f2490c46e0778d445404a6f5f, disabling compactions & flushes 2024-11-20T22:24:15,194 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:15,195 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:15,195 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. after waiting 0 ms 2024-11-20T22:24:15,195 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:15,195 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:15,195 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:15,196 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:24:15,196 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141455196"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141455196"}]},"ts":"1732141455196"} 2024-11-20T22:24:15,198 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:24:15,199 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:24:15,199 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141455199"}]},"ts":"1732141455199"} 2024-11-20T22:24:15,201 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:24:15,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, ASSIGN}] 2024-11-20T22:24:15,220 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, ASSIGN 2024-11-20T22:24:15,221 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, ASSIGN; state=OFFLINE, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=false 2024-11-20T22:24:15,371 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=fe73e78f2490c46e0778d445404a6f5f, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:15,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:24:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T22:24:15,525 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:15,529 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:15,529 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:24:15,529 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,530 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:15,530 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,530 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,535 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,536 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:15,537 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe73e78f2490c46e0778d445404a6f5f columnFamilyName A 2024-11-20T22:24:15,537 DEBUG [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:15,537 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(327): Store=fe73e78f2490c46e0778d445404a6f5f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:15,538 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,539 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:15,540 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe73e78f2490c46e0778d445404a6f5f columnFamilyName B 2024-11-20T22:24:15,540 DEBUG [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:15,540 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(327): Store=fe73e78f2490c46e0778d445404a6f5f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:15,541 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,542 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:15,542 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe73e78f2490c46e0778d445404a6f5f columnFamilyName C 2024-11-20T22:24:15,542 DEBUG [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:15,543 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(327): Store=fe73e78f2490c46e0778d445404a6f5f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:15,543 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:15,544 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,544 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,546 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:24:15,547 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:15,549 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:24:15,549 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened fe73e78f2490c46e0778d445404a6f5f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72071158, jitterRate=0.07394394278526306}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:24:15,550 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:15,551 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., pid=39, masterSystemTime=1732141455524 2024-11-20T22:24:15,553 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:15,553 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:15,553 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=fe73e78f2490c46e0778d445404a6f5f, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:15,556 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-20T22:24:15,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 in 182 msec 2024-11-20T22:24:15,558 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-20T22:24:15,558 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, ASSIGN in 338 msec 2024-11-20T22:24:15,558 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:24:15,559 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141455558"}]},"ts":"1732141455558"} 2024-11-20T22:24:15,560 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:24:15,636 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:24:15,638 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2650 sec 2024-11-20T22:24:16,187 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T22:24:16,190 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T22:24:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T22:24:16,482 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-11-20T22:24:16,487 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f5f7848 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46e36c93 2024-11-20T22:24:16,494 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25ba7cde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:16,496 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:16,499 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:16,501 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:24:16,503 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:24:16,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:24:16,510 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:24:16,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:16,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741940_1116 (size=999) 2024-11-20T22:24:16,928 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T22:24:16,929 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T22:24:16,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:24:16,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, REOPEN/MOVE}] 2024-11-20T22:24:16,943 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, REOPEN/MOVE 2024-11-20T22:24:16,944 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=fe73e78f2490c46e0778d445404a6f5f, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:16,945 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:24:16,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:24:17,097 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,098 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,098 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:24:17,098 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing fe73e78f2490c46e0778d445404a6f5f, disabling compactions & flushes 2024-11-20T22:24:17,098 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,098 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,098 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. after waiting 0 ms 2024-11-20T22:24:17,098 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,105 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T22:24:17,106 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,106 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:17,106 WARN [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: fe73e78f2490c46e0778d445404a6f5f to self. 2024-11-20T22:24:17,108 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,109 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=fe73e78f2490c46e0778d445404a6f5f, regionState=CLOSED 2024-11-20T22:24:17,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-20T22:24:17,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 in 165 msec 2024-11-20T22:24:17,113 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, REOPEN/MOVE; state=CLOSED, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=true 2024-11-20T22:24:17,264 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=fe73e78f2490c46e0778d445404a6f5f, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:24:17,418 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,421 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,421 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:24:17,421 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,421 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:17,422 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,422 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,424 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,424 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:17,429 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe73e78f2490c46e0778d445404a6f5f columnFamilyName A 2024-11-20T22:24:17,431 DEBUG [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:17,432 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(327): Store=fe73e78f2490c46e0778d445404a6f5f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:17,432 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,433 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:17,433 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe73e78f2490c46e0778d445404a6f5f columnFamilyName B 2024-11-20T22:24:17,434 DEBUG [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:17,434 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(327): Store=fe73e78f2490c46e0778d445404a6f5f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:17,434 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,435 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:17,436 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe73e78f2490c46e0778d445404a6f5f columnFamilyName C 2024-11-20T22:24:17,436 DEBUG [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:17,436 INFO [StoreOpener-fe73e78f2490c46e0778d445404a6f5f-1 {}] regionserver.HStore(327): Store=fe73e78f2490c46e0778d445404a6f5f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:17,437 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,437 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,438 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,440 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:24:17,442 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,443 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened fe73e78f2490c46e0778d445404a6f5f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60020154, jitterRate=-0.10563001036643982}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:24:17,446 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:17,447 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., pid=44, masterSystemTime=1732141457418 2024-11-20T22:24:17,449 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,449 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,449 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=fe73e78f2490c46e0778d445404a6f5f, regionState=OPEN, openSeqNum=5, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-11-20T22:24:17,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 in 186 msec 2024-11-20T22:24:17,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-20T22:24:17,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, REOPEN/MOVE in 511 msec 2024-11-20T22:24:17,461 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-20T22:24:17,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 527 msec 2024-11-20T22:24:17,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 950 msec 2024-11-20T22:24:17,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T22:24:17,476 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x299dc956 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d692075 2024-11-20T22:24:17,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fd89dfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,550 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1147c8c4 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7850c240 2024-11-20T22:24:17,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@749bbca0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,563 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65069e2f to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@56071a84 2024-11-20T22:24:17,569 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b3f83a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,571 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05bb3648 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@13c3a5f2 2024-11-20T22:24:17,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e0bee68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,578 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5fc07fbc to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39e05932 2024-11-20T22:24:17,585 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@280988c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,587 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x466910ad to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22c29f60 2024-11-20T22:24:17,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51e4cf50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,595 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d20fc0f to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4baf4554 2024-11-20T22:24:17,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c469d3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,604 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26ebd463 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@175fb00f 2024-11-20T22:24:17,611 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ff870b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,613 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37126f6a to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d277e4f 2024-11-20T22:24:17,619 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@186d094, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:17,626 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:17,627 DEBUG [hconnection-0x76acf04a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-20T22:24:17,628 DEBUG [hconnection-0x1b8bee3e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,629 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:17,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T22:24:17,629 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:17,630 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:17,630 DEBUG [hconnection-0x12389a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,630 DEBUG [hconnection-0x19485bb6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,631 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,631 DEBUG [hconnection-0x139d6c1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,631 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,631 DEBUG [hconnection-0x4b4db09-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,631 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48286, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,632 DEBUG [hconnection-0x47952b3f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,632 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,633 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,634 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48304, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,638 DEBUG [hconnection-0x7d963984-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,639 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48326, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,641 DEBUG [hconnection-0x1802c99f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:17,642 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,642 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:17,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:17,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:17,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:17,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:17,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:17,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:17,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:17,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a2869cc01de04abb96199fb8c448cd44_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141457641/Put/seqid=0 2024-11-20T22:24:17,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741941_1117 (size=19474) 2024-11-20T22:24:17,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141517714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,722 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:17,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141517717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141517717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,727 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a2869cc01de04abb96199fb8c448cd44_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2869cc01de04abb96199fb8c448cd44_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:17,729 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/d89b3fe52411476a8255c539f4e79bf9, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:17,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141517721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T22:24:17,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141517721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/d89b3fe52411476a8255c539f4e79bf9 is 175, key is test_row_0/A:col10/1732141457641/Put/seqid=0 2024-11-20T22:24:17,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741942_1118 (size=56733) 2024-11-20T22:24:17,761 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/d89b3fe52411476a8255c539f4e79bf9 2024-11-20T22:24:17,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:17,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:17,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:17,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:17,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:17,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/7b77c8a01986494a933af2ed266ddda8 is 50, key is test_row_0/B:col10/1732141457641/Put/seqid=0 2024-11-20T22:24:17,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141517823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141517826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141517826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141517833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141517840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741943_1119 (size=12001) 2024-11-20T22:24:17,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/7b77c8a01986494a933af2ed266ddda8 2024-11-20T22:24:17,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/90460e659e52439cb9dfcbd8b514f49d is 50, key is test_row_0/C:col10/1732141457641/Put/seqid=0 2024-11-20T22:24:17,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T22:24:17,936 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:17,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741944_1120 (size=12001) 2024-11-20T22:24:17,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:17,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:17,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:17,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:17,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:17,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/90460e659e52439cb9dfcbd8b514f49d 2024-11-20T22:24:17,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/d89b3fe52411476a8255c539f4e79bf9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/d89b3fe52411476a8255c539f4e79bf9 2024-11-20T22:24:17,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/d89b3fe52411476a8255c539f4e79bf9, entries=300, sequenceid=15, filesize=55.4 K 2024-11-20T22:24:17,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/7b77c8a01986494a933af2ed266ddda8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/7b77c8a01986494a933af2ed266ddda8 2024-11-20T22:24:17,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/7b77c8a01986494a933af2ed266ddda8, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:24:17,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/90460e659e52439cb9dfcbd8b514f49d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/90460e659e52439cb9dfcbd8b514f49d 2024-11-20T22:24:17,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/90460e659e52439cb9dfcbd8b514f49d, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T22:24:17,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for fe73e78f2490c46e0778d445404a6f5f in 329ms, sequenceid=15, compaction requested=false 2024-11-20T22:24:17,978 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T22:24:17,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:18,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:18,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T22:24:18,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:18,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:18,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:18,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:18,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:18,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:18,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141518042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141518044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141518048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141518048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141518050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203a36d23cfc4645a3a24a202b1dff1487_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141457713/Put/seqid=0 2024-11-20T22:24:18,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741945_1121 (size=14594) 2024-11-20T22:24:18,090 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:18,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:18,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141518151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141518151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141518158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141518158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T22:24:18,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:18,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:18,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141518359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141518365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141518358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141518365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141518364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,398 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:18,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:18,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,481 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:18,485 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203a36d23cfc4645a3a24a202b1dff1487_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203a36d23cfc4645a3a24a202b1dff1487_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:18,488 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/923977962c2540c0949aa05c372878d4, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:18,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/923977962c2540c0949aa05c372878d4 is 175, key is test_row_0/A:col10/1732141457713/Put/seqid=0 2024-11-20T22:24:18,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741946_1122 (size=39549) 2024-11-20T22:24:18,514 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/923977962c2540c0949aa05c372878d4 2024-11-20T22:24:18,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/652f70bf8a00436697b250e200585750 is 50, key is test_row_0/B:col10/1732141457713/Put/seqid=0 2024-11-20T22:24:18,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:18,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:18,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741947_1123 (size=12001) 2024-11-20T22:24:18,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/652f70bf8a00436697b250e200585750 2024-11-20T22:24:18,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/9cf902f7b93e4c29a98ea5e23d8dc644 is 50, key is test_row_0/C:col10/1732141457713/Put/seqid=0 2024-11-20T22:24:18,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741948_1124 (size=12001) 2024-11-20T22:24:18,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/9cf902f7b93e4c29a98ea5e23d8dc644 2024-11-20T22:24:18,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/923977962c2540c0949aa05c372878d4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/923977962c2540c0949aa05c372878d4 2024-11-20T22:24:18,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/923977962c2540c0949aa05c372878d4, entries=200, sequenceid=42, filesize=38.6 K 2024-11-20T22:24:18,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/652f70bf8a00436697b250e200585750 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/652f70bf8a00436697b250e200585750 2024-11-20T22:24:18,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/652f70bf8a00436697b250e200585750, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T22:24:18,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/9cf902f7b93e4c29a98ea5e23d8dc644 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/9cf902f7b93e4c29a98ea5e23d8dc644 2024-11-20T22:24:18,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/9cf902f7b93e4c29a98ea5e23d8dc644, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T22:24:18,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for fe73e78f2490c46e0778d445404a6f5f in 623ms, sequenceid=42, compaction requested=false 2024-11-20T22:24:18,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:18,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:18,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:18,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:18,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:18,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:18,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:18,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:18,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:18,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:18,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141518705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141518708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141518709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141518708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f8a14f8ff5b4c96abbfe8562c71344b_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141458672/Put/seqid=0 2024-11-20T22:24:18,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T22:24:18,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141518814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141518815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141518815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141518815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741949_1125 (size=14594) 2024-11-20T22:24:18,867 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:18,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:18,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:18,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:18,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:18,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141518869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,020 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,022 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:19,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:19,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,022 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141519022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141519023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141519023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141519023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,175 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:19,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:19,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,240 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:19,244 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f8a14f8ff5b4c96abbfe8562c71344b_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f8a14f8ff5b4c96abbfe8562c71344b_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:19,246 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/86ed06785e394c1f96f8c6506cbc02ec, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:19,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/86ed06785e394c1f96f8c6506cbc02ec is 175, key is test_row_0/A:col10/1732141458672/Put/seqid=0 2024-11-20T22:24:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741950_1126 (size=39549) 2024-11-20T22:24:19,254 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/86ed06785e394c1f96f8c6506cbc02ec 2024-11-20T22:24:19,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/3b21c933398f4fd984680db51f31c54e is 50, key is test_row_0/B:col10/1732141458672/Put/seqid=0 2024-11-20T22:24:19,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741951_1127 (size=12001) 2024-11-20T22:24:19,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/3b21c933398f4fd984680db51f31c54e 2024-11-20T22:24:19,328 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4d23665fcced425ca69cf1f067af60c0 is 50, key is test_row_0/C:col10/1732141458672/Put/seqid=0 2024-11-20T22:24:19,329 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:19,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:19,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:19,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141519329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141519332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141519332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141519331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741952_1128 (size=12001) 2024-11-20T22:24:19,364 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4d23665fcced425ca69cf1f067af60c0 2024-11-20T22:24:19,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/86ed06785e394c1f96f8c6506cbc02ec as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/86ed06785e394c1f96f8c6506cbc02ec 2024-11-20T22:24:19,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/86ed06785e394c1f96f8c6506cbc02ec, entries=200, sequenceid=54, filesize=38.6 K 2024-11-20T22:24:19,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/3b21c933398f4fd984680db51f31c54e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/3b21c933398f4fd984680db51f31c54e 2024-11-20T22:24:19,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/3b21c933398f4fd984680db51f31c54e, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T22:24:19,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4d23665fcced425ca69cf1f067af60c0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4d23665fcced425ca69cf1f067af60c0 2024-11-20T22:24:19,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4d23665fcced425ca69cf1f067af60c0, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T22:24:19,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for fe73e78f2490c46e0778d445404a6f5f in 723ms, sequenceid=54, compaction requested=true 2024-11-20T22:24:19,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:19,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:19,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:19,398 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:19,398 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:19,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:19,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:19,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:19,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:19,400 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 135831 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:19,400 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:19,401 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,401 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/d89b3fe52411476a8255c539f4e79bf9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/923977962c2540c0949aa05c372878d4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/86ed06785e394c1f96f8c6506cbc02ec] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=132.6 K 2024-11-20T22:24:19,401 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,401 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/d89b3fe52411476a8255c539f4e79bf9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/923977962c2540c0949aa05c372878d4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/86ed06785e394c1f96f8c6506cbc02ec] 2024-11-20T22:24:19,401 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:19,401 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:19,401 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,402 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/7b77c8a01986494a933af2ed266ddda8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/652f70bf8a00436697b250e200585750, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/3b21c933398f4fd984680db51f31c54e] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=35.2 K 2024-11-20T22:24:19,402 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d89b3fe52411476a8255c539f4e79bf9, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141457637 2024-11-20T22:24:19,402 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 923977962c2540c0949aa05c372878d4, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141457676 2024-11-20T22:24:19,402 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b77c8a01986494a933af2ed266ddda8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141457641 2024-11-20T22:24:19,403 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86ed06785e394c1f96f8c6506cbc02ec, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141458041 2024-11-20T22:24:19,403 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 652f70bf8a00436697b250e200585750, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141457676 2024-11-20T22:24:19,404 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b21c933398f4fd984680db51f31c54e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141458041 2024-11-20T22:24:19,414 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:19,421 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#109 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:19,422 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/20475f88f6cf4ce18c52c22125174158 is 50, key is test_row_0/B:col10/1732141458672/Put/seqid=0 2024-11-20T22:24:19,438 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120cdab7076109b471d8a70ffcd7033dce3_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:19,443 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120cdab7076109b471d8a70ffcd7033dce3_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:19,444 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cdab7076109b471d8a70ffcd7033dce3_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:19,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741953_1129 (size=12104) 2024-11-20T22:24:19,467 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/20475f88f6cf4ce18c52c22125174158 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/20475f88f6cf4ce18c52c22125174158 2024-11-20T22:24:19,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741954_1130 (size=4469) 2024-11-20T22:24:19,480 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#108 average throughput is 0.38 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:19,482 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/06380b6a8c76481db56bfce25571d2a6 is 175, key is test_row_0/A:col10/1732141458672/Put/seqid=0 2024-11-20T22:24:19,483 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T22:24:19,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,484 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:24:19,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:19,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:19,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:19,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:19,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:19,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:19,501 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into 20475f88f6cf4ce18c52c22125174158(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:19,501 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:19,502 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=13, startTime=1732141459398; duration=0sec 2024-11-20T22:24:19,502 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:19,502 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:19,502 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:19,506 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:19,506 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:19,507 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:19,507 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/90460e659e52439cb9dfcbd8b514f49d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/9cf902f7b93e4c29a98ea5e23d8dc644, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4d23665fcced425ca69cf1f067af60c0] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=35.2 K 2024-11-20T22:24:19,507 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 90460e659e52439cb9dfcbd8b514f49d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141457641 2024-11-20T22:24:19,508 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cf902f7b93e4c29a98ea5e23d8dc644, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141457676 2024-11-20T22:24:19,508 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d23665fcced425ca69cf1f067af60c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141458041 2024-11-20T22:24:19,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741955_1131 (size=31058) 2024-11-20T22:24:19,534 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#110 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:19,535 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d1e5f8de04814c2180c7b73e586adc11 is 50, key is test_row_0/C:col10/1732141458672/Put/seqid=0 2024-11-20T22:24:19,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207ab0e0624cc440e0b94453a76db73564_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141458702/Put/seqid=0 2024-11-20T22:24:19,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741956_1132 (size=12104) 2024-11-20T22:24:19,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741957_1133 (size=12154) 2024-11-20T22:24:19,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:19,618 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207ab0e0624cc440e0b94453a76db73564_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207ab0e0624cc440e0b94453a76db73564_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:19,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/5e1506ac8a864defb15976dc7ceab9f6, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:19,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/5e1506ac8a864defb15976dc7ceab9f6 is 175, key is test_row_0/A:col10/1732141458702/Put/seqid=0 2024-11-20T22:24:19,638 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:24:19,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741958_1134 (size=30955) 2024-11-20T22:24:19,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T22:24:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:19,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:19,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141519851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141519853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141519853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141519854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141519884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,939 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/06380b6a8c76481db56bfce25571d2a6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/06380b6a8c76481db56bfce25571d2a6 2024-11-20T22:24:19,945 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into 06380b6a8c76481db56bfce25571d2a6(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:19,945 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:19,945 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=13, startTime=1732141459398; duration=0sec 2024-11-20T22:24:19,945 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:19,945 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:19,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141519955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141519961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141519967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:19,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141519981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,016 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d1e5f8de04814c2180c7b73e586adc11 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d1e5f8de04814c2180c7b73e586adc11 2024-11-20T22:24:20,032 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into d1e5f8de04814c2180c7b73e586adc11(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:20,032 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:20,032 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=13, startTime=1732141459398; duration=0sec 2024-11-20T22:24:20,032 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:20,032 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:20,057 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/5e1506ac8a864defb15976dc7ceab9f6 2024-11-20T22:24:20,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/92b7c9a25c3b4f47b0c25bf4180a0028 is 50, key is test_row_0/B:col10/1732141458702/Put/seqid=0 2024-11-20T22:24:20,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741959_1135 (size=12001) 2024-11-20T22:24:20,101 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/92b7c9a25c3b4f47b0c25bf4180a0028 2024-11-20T22:24:20,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/30140b117dda46e6a46b6a94ded72124 is 50, key is test_row_0/C:col10/1732141458702/Put/seqid=0 2024-11-20T22:24:20,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741960_1136 (size=12001) 2024-11-20T22:24:20,126 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/30140b117dda46e6a46b6a94ded72124 2024-11-20T22:24:20,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/5e1506ac8a864defb15976dc7ceab9f6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/5e1506ac8a864defb15976dc7ceab9f6 2024-11-20T22:24:20,145 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/5e1506ac8a864defb15976dc7ceab9f6, entries=150, sequenceid=78, filesize=30.2 K 2024-11-20T22:24:20,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/92b7c9a25c3b4f47b0c25bf4180a0028 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/92b7c9a25c3b4f47b0c25bf4180a0028 2024-11-20T22:24:20,153 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/92b7c9a25c3b4f47b0c25bf4180a0028, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T22:24:20,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/30140b117dda46e6a46b6a94ded72124 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/30140b117dda46e6a46b6a94ded72124 2024-11-20T22:24:20,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141520159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,164 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/30140b117dda46e6a46b6a94ded72124, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T22:24:20,165 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for fe73e78f2490c46e0778d445404a6f5f in 681ms, sequenceid=78, compaction requested=false 2024-11-20T22:24:20,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:20,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:20,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-20T22:24:20,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-20T22:24:20,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-20T22:24:20,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5370 sec 2024-11-20T22:24:20,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:24:20,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:20,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:20,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 2.5440 sec 2024-11-20T22:24:20,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:20,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:20,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:20,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:20,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:20,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120da2a06ac8d364527ab7707aab82d3f40_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141460170/Put/seqid=0 2024-11-20T22:24:20,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741961_1137 (size=12154) 2024-11-20T22:24:20,219 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:20,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141520213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141520214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141520219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,225 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120da2a06ac8d364527ab7707aab82d3f40_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da2a06ac8d364527ab7707aab82d3f40_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:20,227 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/61124b6f9fd749dba1d22dc9f04294e2, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:20,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/61124b6f9fd749dba1d22dc9f04294e2 is 175, key is test_row_0/A:col10/1732141460170/Put/seqid=0 2024-11-20T22:24:20,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741962_1138 (size=30955) 2024-11-20T22:24:20,260 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/61124b6f9fd749dba1d22dc9f04294e2 2024-11-20T22:24:20,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/d96be29aaf904508965a538404b59285 is 50, key is test_row_0/B:col10/1732141460170/Put/seqid=0 2024-11-20T22:24:20,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741963_1139 (size=12001) 2024-11-20T22:24:20,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/d96be29aaf904508965a538404b59285 2024-11-20T22:24:20,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141520321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141520321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141520328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e is 50, key is test_row_0/C:col10/1732141460170/Put/seqid=0 2024-11-20T22:24:20,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741964_1140 (size=12001) 2024-11-20T22:24:20,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e 2024-11-20T22:24:20,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/61124b6f9fd749dba1d22dc9f04294e2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/61124b6f9fd749dba1d22dc9f04294e2 2024-11-20T22:24:20,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/61124b6f9fd749dba1d22dc9f04294e2, entries=150, sequenceid=96, filesize=30.2 K 2024-11-20T22:24:20,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/d96be29aaf904508965a538404b59285 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d96be29aaf904508965a538404b59285 2024-11-20T22:24:20,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d96be29aaf904508965a538404b59285, entries=150, sequenceid=96, filesize=11.7 K 2024-11-20T22:24:20,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e 2024-11-20T22:24:20,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e, entries=150, sequenceid=96, filesize=11.7 K 2024-11-20T22:24:20,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for fe73e78f2490c46e0778d445404a6f5f in 279ms, sequenceid=96, compaction requested=true 2024-11-20T22:24:20,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:20,451 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:20,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:20,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:20,453 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:20,454 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:20,454 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:20,454 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:20,454 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/06380b6a8c76481db56bfce25571d2a6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/5e1506ac8a864defb15976dc7ceab9f6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/61124b6f9fd749dba1d22dc9f04294e2] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=90.8 K 2024-11-20T22:24:20,454 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:20,454 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/06380b6a8c76481db56bfce25571d2a6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/5e1506ac8a864defb15976dc7ceab9f6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/61124b6f9fd749dba1d22dc9f04294e2] 2024-11-20T22:24:20,455 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06380b6a8c76481db56bfce25571d2a6, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141458041 2024-11-20T22:24:20,456 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:20,456 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:20,456 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:20,456 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/20475f88f6cf4ce18c52c22125174158, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/92b7c9a25c3b4f47b0c25bf4180a0028, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d96be29aaf904508965a538404b59285] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=35.3 K 2024-11-20T22:24:20,456 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e1506ac8a864defb15976dc7ceab9f6, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732141458694 2024-11-20T22:24:20,457 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 20475f88f6cf4ce18c52c22125174158, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141458041 2024-11-20T22:24:20,458 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61124b6f9fd749dba1d22dc9f04294e2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141459852 2024-11-20T22:24:20,458 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 92b7c9a25c3b4f47b0c25bf4180a0028, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732141458694 2024-11-20T22:24:20,459 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d96be29aaf904508965a538404b59285, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141459852 2024-11-20T22:24:20,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:20,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:20,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:20,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:20,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:20,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:20,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:20,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:20,475 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:20,481 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#118 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:20,481 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/e476556b8f8d4ca9a52af901c4042204 is 50, key is test_row_0/B:col10/1732141460170/Put/seqid=0 2024-11-20T22:24:20,483 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112092efda5484ee4b0da1a971beca3b5625_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:20,486 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112092efda5484ee4b0da1a971beca3b5625_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:20,486 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112092efda5484ee4b0da1a971beca3b5625_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:20,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741965_1141 (size=12207) 2024-11-20T22:24:20,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741966_1142 (size=4469) 2024-11-20T22:24:20,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112012a7aff7f3c74497815b388eabfdbcf5_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141460217/Put/seqid=0 2024-11-20T22:24:20,516 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#117 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:20,517 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/30dea8e3df774d2397da742c12b8342b is 175, key is test_row_0/A:col10/1732141460170/Put/seqid=0 2024-11-20T22:24:20,521 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/e476556b8f8d4ca9a52af901c4042204 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/e476556b8f8d4ca9a52af901c4042204 2024-11-20T22:24:20,529 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into e476556b8f8d4ca9a52af901c4042204(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:20,529 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:20,529 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=13, startTime=1732141460453; duration=0sec 2024-11-20T22:24:20,529 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:20,529 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:20,529 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:20,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141520527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141520529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,532 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:20,532 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:20,532 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:20,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,532 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d1e5f8de04814c2180c7b73e586adc11, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/30140b117dda46e6a46b6a94ded72124, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=35.3 K 2024-11-20T22:24:20,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141520530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,534 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d1e5f8de04814c2180c7b73e586adc11, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141458041 2024-11-20T22:24:20,534 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 30140b117dda46e6a46b6a94ded72124, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732141458694 2024-11-20T22:24:20,535 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ab1d89f9fbe40bd8b7b2dd7fd913c0e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141459852 2024-11-20T22:24:20,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141520534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741967_1143 (size=14594) 2024-11-20T22:24:20,551 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:20,561 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112012a7aff7f3c74497815b388eabfdbcf5_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112012a7aff7f3c74497815b388eabfdbcf5_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:20,563 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/ea45d439652d46ada94d6f828daa4a89, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:20,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/ea45d439652d46ada94d6f828daa4a89 is 175, key is test_row_0/A:col10/1732141460217/Put/seqid=0 2024-11-20T22:24:20,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741968_1144 (size=31161) 2024-11-20T22:24:20,571 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#120 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:20,572 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/f026d1ace4cb421b9e9b7280a7a2e926 is 50, key is test_row_0/C:col10/1732141460170/Put/seqid=0 2024-11-20T22:24:20,576 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/30dea8e3df774d2397da742c12b8342b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/30dea8e3df774d2397da742c12b8342b 2024-11-20T22:24:20,584 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into 30dea8e3df774d2397da742c12b8342b(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:20,584 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:20,584 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=13, startTime=1732141460451; duration=0sec 2024-11-20T22:24:20,584 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:20,584 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:20,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741969_1145 (size=39549) 2024-11-20T22:24:20,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741970_1146 (size=12207) 2024-11-20T22:24:20,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141520633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141520633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141520836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141520836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141520837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:20,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:20,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141520843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,007 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/ea45d439652d46ada94d6f828daa4a89 2024-11-20T22:24:21,014 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/f026d1ace4cb421b9e9b7280a7a2e926 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f026d1ace4cb421b9e9b7280a7a2e926 2024-11-20T22:24:21,026 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into f026d1ace4cb421b9e9b7280a7a2e926(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:21,026 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:21,026 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=13, startTime=1732141460468; duration=0sec 2024-11-20T22:24:21,026 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:21,026 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:21,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/d0cb28dce7f245f5a87fa0f362846475 is 50, key is test_row_0/B:col10/1732141460217/Put/seqid=0 2024-11-20T22:24:21,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741971_1147 (size=12001) 2024-11-20T22:24:21,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/d0cb28dce7f245f5a87fa0f362846475 2024-11-20T22:24:21,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/b7b2c1e782414d298d9327278b29c2bb is 50, key is test_row_0/C:col10/1732141460217/Put/seqid=0 2024-11-20T22:24:21,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741972_1148 (size=12001) 2024-11-20T22:24:21,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/b7b2c1e782414d298d9327278b29c2bb 2024-11-20T22:24:21,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141521142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141521144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/ea45d439652d46ada94d6f828daa4a89 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ea45d439652d46ada94d6f828daa4a89 2024-11-20T22:24:21,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ea45d439652d46ada94d6f828daa4a89, entries=200, sequenceid=118, filesize=38.6 K 2024-11-20T22:24:21,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/d0cb28dce7f245f5a87fa0f362846475 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d0cb28dce7f245f5a87fa0f362846475 2024-11-20T22:24:21,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d0cb28dce7f245f5a87fa0f362846475, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T22:24:21,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/b7b2c1e782414d298d9327278b29c2bb as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b7b2c1e782414d298d9327278b29c2bb 2024-11-20T22:24:21,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b7b2c1e782414d298d9327278b29c2bb, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T22:24:21,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for fe73e78f2490c46e0778d445404a6f5f in 709ms, sequenceid=118, compaction requested=false 2024-11-20T22:24:21,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:21,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:21,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:21,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:21,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:21,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:21,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:21,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:21,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141521397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120280150315869469fa862f086ebbd42b4_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141460529/Put/seqid=0 2024-11-20T22:24:21,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141521404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741973_1149 (size=17184) 2024-11-20T22:24:21,453 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:21,460 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120280150315869469fa862f086ebbd42b4_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120280150315869469fa862f086ebbd42b4_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:21,461 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/38a318eee979431b89723b443d2a4f0a, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:21,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/38a318eee979431b89723b443d2a4f0a is 175, key is test_row_0/A:col10/1732141460529/Put/seqid=0 2024-11-20T22:24:21,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741974_1150 (size=48289) 2024-11-20T22:24:21,502 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/38a318eee979431b89723b443d2a4f0a 2024-11-20T22:24:21,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141521505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141521508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f4cde6257eec4bc6955b6ebb1d7f09ed is 50, key is test_row_0/B:col10/1732141460529/Put/seqid=0 2024-11-20T22:24:21,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741975_1151 (size=12151) 2024-11-20T22:24:21,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f4cde6257eec4bc6955b6ebb1d7f09ed 2024-11-20T22:24:21,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/a5dc15cbb1e0424aa327015a12048d2c is 50, key is test_row_0/C:col10/1732141460529/Put/seqid=0 2024-11-20T22:24:21,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741976_1152 (size=12151) 2024-11-20T22:24:21,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/a5dc15cbb1e0424aa327015a12048d2c 2024-11-20T22:24:21,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/38a318eee979431b89723b443d2a4f0a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/38a318eee979431b89723b443d2a4f0a 2024-11-20T22:24:21,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/38a318eee979431b89723b443d2a4f0a, entries=250, sequenceid=137, filesize=47.2 K 2024-11-20T22:24:21,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f4cde6257eec4bc6955b6ebb1d7f09ed as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f4cde6257eec4bc6955b6ebb1d7f09ed 2024-11-20T22:24:21,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f4cde6257eec4bc6955b6ebb1d7f09ed, entries=150, sequenceid=137, filesize=11.9 K 2024-11-20T22:24:21,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/a5dc15cbb1e0424aa327015a12048d2c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a5dc15cbb1e0424aa327015a12048d2c 2024-11-20T22:24:21,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a5dc15cbb1e0424aa327015a12048d2c, entries=150, sequenceid=137, filesize=11.9 K 2024-11-20T22:24:21,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for fe73e78f2490c46e0778d445404a6f5f in 271ms, sequenceid=137, compaction requested=true 2024-11-20T22:24:21,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:21,620 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:21,623 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:21,623 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:21,623 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:21,623 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/30dea8e3df774d2397da742c12b8342b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ea45d439652d46ada94d6f828daa4a89, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/38a318eee979431b89723b443d2a4f0a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=116.2 K 2024-11-20T22:24:21,623 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:21,623 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/30dea8e3df774d2397da742c12b8342b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ea45d439652d46ada94d6f828daa4a89, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/38a318eee979431b89723b443d2a4f0a] 2024-11-20T22:24:21,624 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30dea8e3df774d2397da742c12b8342b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141459852 2024-11-20T22:24:21,625 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea45d439652d46ada94d6f828daa4a89, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141460206 2024-11-20T22:24:21,625 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38a318eee979431b89723b443d2a4f0a, keycount=250, bloomtype=ROW, size=47.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732141460491 2024-11-20T22:24:21,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:21,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:21,633 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:21,634 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:21,634 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:21,634 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:21,635 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/e476556b8f8d4ca9a52af901c4042204, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d0cb28dce7f245f5a87fa0f362846475, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f4cde6257eec4bc6955b6ebb1d7f09ed] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=35.5 K 2024-11-20T22:24:21,636 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e476556b8f8d4ca9a52af901c4042204, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141459852 2024-11-20T22:24:21,636 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d0cb28dce7f245f5a87fa0f362846475, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141460206 2024-11-20T22:24:21,637 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f4cde6257eec4bc6955b6ebb1d7f09ed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732141460529 2024-11-20T22:24:21,643 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:21,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:21,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:21,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:21,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:21,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:24:21,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:21,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:21,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:21,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:21,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:21,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:21,655 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204709a356ba854e9382ba097158476122_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:21,657 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204709a356ba854e9382ba097158476122_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:21,658 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204709a356ba854e9382ba097158476122_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:21,666 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#127 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:21,667 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/b4d62d12ebbe4ad8a9255f6d04d08695 is 50, key is test_row_0/B:col10/1732141460529/Put/seqid=0 2024-11-20T22:24:21,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f04700efe69453081c77f96ac51aca6_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141461395/Put/seqid=0 2024-11-20T22:24:21,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741977_1153 (size=4469) 2024-11-20T22:24:21,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741978_1154 (size=12459) 2024-11-20T22:24:21,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741979_1155 (size=12304) 2024-11-20T22:24:21,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141521719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141521720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141521721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141521723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T22:24:21,754 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-20T22:24:21,763 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:21,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-20T22:24:21,765 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:21,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:24:21,765 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:21,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:21,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141521822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141521825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141521825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141521826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:24:21,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:21,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141521895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,900 DEBUG [Thread-590 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., hostname=6365a1e51efd,46811,1732141422048, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:21,917 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:21,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:21,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:21,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:21,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:21,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:21,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:21,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141522025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141522029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141522031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141522040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:24:22,076 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:22,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:22,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,091 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#126 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:22,092 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/fb854acc94f54ff5adad14e786bc8e68 is 175, key is test_row_0/A:col10/1732141460529/Put/seqid=0 2024-11-20T22:24:22,111 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/b4d62d12ebbe4ad8a9255f6d04d08695 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b4d62d12ebbe4ad8a9255f6d04d08695 2024-11-20T22:24:22,111 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:22,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741980_1156 (size=31413) 2024-11-20T22:24:22,126 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f04700efe69453081c77f96ac51aca6_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f04700efe69453081c77f96ac51aca6_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:22,128 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into b4d62d12ebbe4ad8a9255f6d04d08695(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:22,129 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:22,129 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=13, startTime=1732141461633; duration=0sec 2024-11-20T22:24:22,129 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:22,129 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:22,129 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:22,129 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/dd0d598132e547c6a28d0b1dfdd24003, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:22,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/dd0d598132e547c6a28d0b1dfdd24003 is 175, key is test_row_0/A:col10/1732141461395/Put/seqid=0 2024-11-20T22:24:22,132 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:22,133 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:22,133 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,133 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f026d1ace4cb421b9e9b7280a7a2e926, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b7b2c1e782414d298d9327278b29c2bb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a5dc15cbb1e0424aa327015a12048d2c] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=35.5 K 2024-11-20T22:24:22,133 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f026d1ace4cb421b9e9b7280a7a2e926, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732141459852 2024-11-20T22:24:22,134 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/fb854acc94f54ff5adad14e786bc8e68 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/fb854acc94f54ff5adad14e786bc8e68 2024-11-20T22:24:22,134 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b7b2c1e782414d298d9327278b29c2bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141460206 2024-11-20T22:24:22,135 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a5dc15cbb1e0424aa327015a12048d2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732141460529 2024-11-20T22:24:22,140 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into fb854acc94f54ff5adad14e786bc8e68(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:22,140 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:22,140 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=13, startTime=1732141461620; duration=0sec 2024-11-20T22:24:22,141 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:22,141 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:22,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741981_1157 (size=31105) 2024-11-20T22:24:22,149 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#129 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:22,150 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/dd0d598132e547c6a28d0b1dfdd24003 2024-11-20T22:24:22,150 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/48052de9105b4584af5783eb1aad3601 is 50, key is test_row_0/C:col10/1732141460529/Put/seqid=0 2024-11-20T22:24:22,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741982_1158 (size=12459) 2024-11-20T22:24:22,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/0bff6b768a4b4744b6b3c34f5b5a52cb is 50, key is test_row_0/B:col10/1732141461395/Put/seqid=0 2024-11-20T22:24:22,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741983_1159 (size=12151) 2024-11-20T22:24:22,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/0bff6b768a4b4744b6b3c34f5b5a52cb 2024-11-20T22:24:22,237 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:22,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:22,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/853917c0221b4ae18c8463f4a6587ddc is 50, key is test_row_0/C:col10/1732141461395/Put/seqid=0 2024-11-20T22:24:22,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741984_1160 (size=12151) 2024-11-20T22:24:22,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/853917c0221b4ae18c8463f4a6587ddc 2024-11-20T22:24:22,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/dd0d598132e547c6a28d0b1dfdd24003 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/dd0d598132e547c6a28d0b1dfdd24003 2024-11-20T22:24:22,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/dd0d598132e547c6a28d0b1dfdd24003, entries=150, sequenceid=158, filesize=30.4 K 2024-11-20T22:24:22,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/0bff6b768a4b4744b6b3c34f5b5a52cb as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/0bff6b768a4b4744b6b3c34f5b5a52cb 2024-11-20T22:24:22,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/0bff6b768a4b4744b6b3c34f5b5a52cb, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T22:24:22,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/853917c0221b4ae18c8463f4a6587ddc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/853917c0221b4ae18c8463f4a6587ddc 2024-11-20T22:24:22,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/853917c0221b4ae18c8463f4a6587ddc, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T22:24:22,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for fe73e78f2490c46e0778d445404a6f5f in 672ms, sequenceid=158, compaction requested=false 2024-11-20T22:24:22,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:22,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:22,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T22:24:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:22,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206e77fcd0535845ff99a015bc811531f3_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141461718/Put/seqid=0 2024-11-20T22:24:22,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741985_1161 (size=12304) 2024-11-20T22:24:22,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:24:22,397 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:22,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:22,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141522402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141522407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141522409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141522409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141522510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141522512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141522519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141522519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,551 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:22,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:22,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,597 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/48052de9105b4584af5783eb1aad3601 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/48052de9105b4584af5783eb1aad3601 2024-11-20T22:24:22,606 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into 48052de9105b4584af5783eb1aad3601(size=12.2 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:22,606 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:22,606 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=13, startTime=1732141461649; duration=0sec 2024-11-20T22:24:22,607 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:22,607 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:22,705 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:22,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:22,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141522723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141522723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141522725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:22,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141522730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,753 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:22,793 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206e77fcd0535845ff99a015bc811531f3_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206e77fcd0535845ff99a015bc811531f3_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:22,798 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c19c4893ba984affa363a521a2cd1a7a, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:22,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c19c4893ba984affa363a521a2cd1a7a is 175, key is test_row_0/A:col10/1732141461718/Put/seqid=0 2024-11-20T22:24:22,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741986_1162 (size=31105) 2024-11-20T22:24:22,849 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=177, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c19c4893ba984affa363a521a2cd1a7a 2024-11-20T22:24:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:24:22,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f758a7c147134fc888de6e35922ceac5 is 50, key is test_row_0/B:col10/1732141461718/Put/seqid=0 2024-11-20T22:24:22,879 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:22,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:22,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:22,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:22,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741987_1163 (size=12151) 2024-11-20T22:24:23,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141523032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141523035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141523037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141523037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:23,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:23,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,199 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:23,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:23,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f758a7c147134fc888de6e35922ceac5 2024-11-20T22:24:23,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/79828ee58afc4eb8a68179aba6f1e4b3 is 50, key is test_row_0/C:col10/1732141461718/Put/seqid=0 2024-11-20T22:24:23,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741988_1164 (size=12151) 2024-11-20T22:24:23,355 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:23,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:23,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,356 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:23,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/79828ee58afc4eb8a68179aba6f1e4b3 2024-11-20T22:24:23,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c19c4893ba984affa363a521a2cd1a7a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c19c4893ba984affa363a521a2cd1a7a 2024-11-20T22:24:23,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c19c4893ba984affa363a521a2cd1a7a, entries=150, sequenceid=177, filesize=30.4 K 2024-11-20T22:24:23,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f758a7c147134fc888de6e35922ceac5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f758a7c147134fc888de6e35922ceac5 2024-11-20T22:24:23,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f758a7c147134fc888de6e35922ceac5, entries=150, sequenceid=177, filesize=11.9 K 2024-11-20T22:24:23,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/79828ee58afc4eb8a68179aba6f1e4b3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/79828ee58afc4eb8a68179aba6f1e4b3 2024-11-20T22:24:23,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/79828ee58afc4eb8a68179aba6f1e4b3, entries=150, sequenceid=177, filesize=11.9 K 2024-11-20T22:24:23,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for fe73e78f2490c46e0778d445404a6f5f in 1097ms, sequenceid=177, compaction requested=true 2024-11-20T22:24:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:23,427 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:23,427 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:23,437 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:23,437 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:23,437 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,437 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b4d62d12ebbe4ad8a9255f6d04d08695, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/0bff6b768a4b4744b6b3c34f5b5a52cb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f758a7c147134fc888de6e35922ceac5] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=35.9 K 2024-11-20T22:24:23,438 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:23,438 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:23,438 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,438 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/fb854acc94f54ff5adad14e786bc8e68, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/dd0d598132e547c6a28d0b1dfdd24003, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c19c4893ba984affa363a521a2cd1a7a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=91.4 K 2024-11-20T22:24:23,438 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,438 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/fb854acc94f54ff5adad14e786bc8e68, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/dd0d598132e547c6a28d0b1dfdd24003, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c19c4893ba984affa363a521a2cd1a7a] 2024-11-20T22:24:23,439 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b4d62d12ebbe4ad8a9255f6d04d08695, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732141460529 2024-11-20T22:24:23,443 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb854acc94f54ff5adad14e786bc8e68, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732141460529 2024-11-20T22:24:23,443 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bff6b768a4b4744b6b3c34f5b5a52cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141461394 2024-11-20T22:24:23,443 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f758a7c147134fc888de6e35922ceac5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141461704 2024-11-20T22:24:23,444 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd0d598132e547c6a28d0b1dfdd24003, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141461394 2024-11-20T22:24:23,446 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c19c4893ba984affa363a521a2cd1a7a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141461704 2024-11-20T22:24:23,471 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#135 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:23,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/fc178e457f02414d814a0049f6bb1e12 is 50, key is test_row_0/B:col10/1732141461718/Put/seqid=0 2024-11-20T22:24:23,484 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:23,508 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411208643cb1d803d4e76ab685597d6b80018_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:23,510 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411208643cb1d803d4e76ab685597d6b80018_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:23,511 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208643cb1d803d4e76ab685597d6b80018_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:23,512 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T22:24:23,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,513 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:24:23,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:23,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:23,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:23,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:23,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:23,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:23,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741989_1165 (size=12561) 2024-11-20T22:24:23,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:23,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:23,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741990_1166 (size=4469) 2024-11-20T22:24:23,566 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#136 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:23,567 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/bd194e5feef74f218964f596d1b2e70a is 175, key is test_row_0/A:col10/1732141461718/Put/seqid=0 2024-11-20T22:24:23,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fe74eb33df244487af51d016de74859c_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141462400/Put/seqid=0 2024-11-20T22:24:23,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741991_1167 (size=31515) 2024-11-20T22:24:23,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141523577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141523577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141523579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141523587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,600 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/bd194e5feef74f218964f596d1b2e70a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bd194e5feef74f218964f596d1b2e70a 2024-11-20T22:24:23,613 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into bd194e5feef74f218964f596d1b2e70a(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:23,613 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:23,613 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=13, startTime=1732141463427; duration=0sec 2024-11-20T22:24:23,613 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:23,613 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:23,613 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:23,615 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:23,615 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:23,615 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:23,615 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/48052de9105b4584af5783eb1aad3601, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/853917c0221b4ae18c8463f4a6587ddc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/79828ee58afc4eb8a68179aba6f1e4b3] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=35.9 K 2024-11-20T22:24:23,616 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48052de9105b4584af5783eb1aad3601, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732141460529 2024-11-20T22:24:23,617 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 853917c0221b4ae18c8463f4a6587ddc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732141461394 2024-11-20T22:24:23,618 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79828ee58afc4eb8a68179aba6f1e4b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141461704 2024-11-20T22:24:23,633 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#138 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:23,633 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/00ba5c88e8cb48dc9a58d8b0b998809c is 50, key is test_row_0/C:col10/1732141461718/Put/seqid=0 2024-11-20T22:24:23,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741992_1168 (size=12304) 2024-11-20T22:24:23,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:23,643 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fe74eb33df244487af51d016de74859c_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fe74eb33df244487af51d016de74859c_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:23,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/8b693e0b5ff44a0095803714f4c9ba15, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:23,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/8b693e0b5ff44a0095803714f4c9ba15 is 175, key is test_row_0/A:col10/1732141462400/Put/seqid=0 2024-11-20T22:24:23,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741994_1170 (size=31105) 2024-11-20T22:24:23,666 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/8b693e0b5ff44a0095803714f4c9ba15 2024-11-20T22:24:23,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741993_1169 (size=12561) 2024-11-20T22:24:23,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141523694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141523695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141523700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141523699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/1098c276946b40828185da18c4792870 is 50, key is test_row_0/B:col10/1732141462400/Put/seqid=0 2024-11-20T22:24:23,715 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/00ba5c88e8cb48dc9a58d8b0b998809c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/00ba5c88e8cb48dc9a58d8b0b998809c 2024-11-20T22:24:23,725 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into 00ba5c88e8cb48dc9a58d8b0b998809c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:23,725 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:23,725 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=13, startTime=1732141463427; duration=0sec 2024-11-20T22:24:23,726 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:23,726 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:23,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741995_1171 (size=12151) 2024-11-20T22:24:23,737 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/1098c276946b40828185da18c4792870 2024-11-20T22:24:23,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/f36c3898836c4f8a963a16e4ed444b86 is 50, key is test_row_0/C:col10/1732141462400/Put/seqid=0 2024-11-20T22:24:23,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741996_1172 (size=12151) 2024-11-20T22:24:23,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:24:23,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141523902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141523902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141523906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:23,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141523907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:23,958 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/fc178e457f02414d814a0049f6bb1e12 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/fc178e457f02414d814a0049f6bb1e12 2024-11-20T22:24:23,969 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into fc178e457f02414d814a0049f6bb1e12(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:23,969 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:23,969 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=13, startTime=1732141463427; duration=0sec 2024-11-20T22:24:23,969 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:23,970 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:24,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141524213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141524215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141524227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141524227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,230 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/f36c3898836c4f8a963a16e4ed444b86 2024-11-20T22:24:24,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/8b693e0b5ff44a0095803714f4c9ba15 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8b693e0b5ff44a0095803714f4c9ba15 2024-11-20T22:24:24,283 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8b693e0b5ff44a0095803714f4c9ba15, entries=150, sequenceid=198, filesize=30.4 K 2024-11-20T22:24:24,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/1098c276946b40828185da18c4792870 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1098c276946b40828185da18c4792870 2024-11-20T22:24:24,295 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1098c276946b40828185da18c4792870, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T22:24:24,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/f36c3898836c4f8a963a16e4ed444b86 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f36c3898836c4f8a963a16e4ed444b86 2024-11-20T22:24:24,309 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f36c3898836c4f8a963a16e4ed444b86, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T22:24:24,311 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for fe73e78f2490c46e0778d445404a6f5f in 798ms, sequenceid=198, compaction requested=false 2024-11-20T22:24:24,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:24,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:24,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-20T22:24:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-20T22:24:24,339 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-20T22:24:24,339 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5650 sec 2024-11-20T22:24:24,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.5760 sec 2024-11-20T22:24:24,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T22:24:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:24,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:24,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:24,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:24,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:24,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:24,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:24,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203b00e44fbdb343deb38eefcdb0fc4b5a_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141463576/Put/seqid=0 2024-11-20T22:24:24,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141524765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141524765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141524804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741997_1173 (size=17284) 2024-11-20T22:24:24,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141524807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,815 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:24,873 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203b00e44fbdb343deb38eefcdb0fc4b5a_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203b00e44fbdb343deb38eefcdb0fc4b5a_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:24,887 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/cc0f0c12ae744baba81074f8fe066bf7, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:24,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/cc0f0c12ae744baba81074f8fe066bf7 is 175, key is test_row_0/A:col10/1732141463576/Put/seqid=0 2024-11-20T22:24:24,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141524902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141524903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141524919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741998_1174 (size=48389) 2024-11-20T22:24:24,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141524925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:24,938 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=219, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/cc0f0c12ae744baba81074f8fe066bf7 2024-11-20T22:24:24,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2b1289462086405f85d10f4e8a8e962d is 50, key is test_row_0/B:col10/1732141463576/Put/seqid=0 2024-11-20T22:24:25,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741999_1175 (size=12151) 2024-11-20T22:24:25,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141525109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141525109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141525131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141525138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2b1289462086405f85d10f4e8a8e962d 2024-11-20T22:24:25,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141525421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d3ec7a086a0a4b968f11911041f7254c is 50, key is test_row_0/C:col10/1732141463576/Put/seqid=0 2024-11-20T22:24:25,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141525421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742000_1176 (size=12151) 2024-11-20T22:24:25,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d3ec7a086a0a4b968f11911041f7254c 2024-11-20T22:24:25,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/cc0f0c12ae744baba81074f8fe066bf7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/cc0f0c12ae744baba81074f8fe066bf7 2024-11-20T22:24:25,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141525442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141525444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/cc0f0c12ae744baba81074f8fe066bf7, entries=250, sequenceid=219, filesize=47.3 K 2024-11-20T22:24:25,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2b1289462086405f85d10f4e8a8e962d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2b1289462086405f85d10f4e8a8e962d 2024-11-20T22:24:25,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2b1289462086405f85d10f4e8a8e962d, entries=150, sequenceid=219, filesize=11.9 K 2024-11-20T22:24:25,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d3ec7a086a0a4b968f11911041f7254c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d3ec7a086a0a4b968f11911041f7254c 2024-11-20T22:24:25,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d3ec7a086a0a4b968f11911041f7254c, entries=150, sequenceid=219, filesize=11.9 K 2024-11-20T22:24:25,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for fe73e78f2490c46e0778d445404a6f5f in 737ms, sequenceid=219, compaction requested=true 2024-11-20T22:24:25,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:25,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:25,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:25,464 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:25,464 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:25,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:25,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:25,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:25,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:25,466 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:25,466 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111009 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:25,466 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:25,466 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:25,466 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:25,466 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:25,466 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bd194e5feef74f218964f596d1b2e70a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8b693e0b5ff44a0095803714f4c9ba15, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/cc0f0c12ae744baba81074f8fe066bf7] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=108.4 K 2024-11-20T22:24:25,466 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/fc178e457f02414d814a0049f6bb1e12, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1098c276946b40828185da18c4792870, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2b1289462086405f85d10f4e8a8e962d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=36.0 K 2024-11-20T22:24:25,466 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:25,466 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bd194e5feef74f218964f596d1b2e70a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8b693e0b5ff44a0095803714f4c9ba15, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/cc0f0c12ae744baba81074f8fe066bf7] 2024-11-20T22:24:25,466 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fc178e457f02414d814a0049f6bb1e12, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141461704 2024-11-20T22:24:25,467 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd194e5feef74f218964f596d1b2e70a, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141461704 2024-11-20T22:24:25,467 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1098c276946b40828185da18c4792870, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141462400 2024-11-20T22:24:25,468 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b693e0b5ff44a0095803714f4c9ba15, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141462400 2024-11-20T22:24:25,468 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b1289462086405f85d10f4e8a8e962d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732141463574 2024-11-20T22:24:25,468 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc0f0c12ae744baba81074f8fe066bf7, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732141463565 2024-11-20T22:24:25,478 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#144 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:25,479 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/563cdbf6b66c484aa5bfd412bf23c8a2 is 50, key is test_row_0/B:col10/1732141463576/Put/seqid=0 2024-11-20T22:24:25,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742001_1177 (size=12663) 2024-11-20T22:24:25,493 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/563cdbf6b66c484aa5bfd412bf23c8a2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/563cdbf6b66c484aa5bfd412bf23c8a2 2024-11-20T22:24:25,495 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:25,498 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c3a7f1da4fb046d48d84fa90a6018478_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:25,500 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c3a7f1da4fb046d48d84fa90a6018478_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:25,500 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c3a7f1da4fb046d48d84fa90a6018478_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:25,500 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into 563cdbf6b66c484aa5bfd412bf23c8a2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:25,501 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:25,501 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=13, startTime=1732141465464; duration=0sec 2024-11-20T22:24:25,501 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:25,502 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:25,502 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:25,503 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:25,504 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:25,504 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:25,504 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/00ba5c88e8cb48dc9a58d8b0b998809c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f36c3898836c4f8a963a16e4ed444b86, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d3ec7a086a0a4b968f11911041f7254c] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=36.0 K 2024-11-20T22:24:25,505 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 00ba5c88e8cb48dc9a58d8b0b998809c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141461704 2024-11-20T22:24:25,505 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f36c3898836c4f8a963a16e4ed444b86, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732141462400 2024-11-20T22:24:25,506 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d3ec7a086a0a4b968f11911041f7254c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732141463574 2024-11-20T22:24:25,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742002_1178 (size=4469) 2024-11-20T22:24:25,537 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#146 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:25,538 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4bf719aa074347e1a05b5f5af2170a6f is 50, key is test_row_0/C:col10/1732141463576/Put/seqid=0 2024-11-20T22:24:25,539 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#145 average throughput is 0.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:25,539 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/da726a3c9bdb4d2ea009521d6e8a02a0 is 175, key is test_row_0/A:col10/1732141463576/Put/seqid=0 2024-11-20T22:24:25,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742004_1180 (size=31617) 2024-11-20T22:24:25,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742003_1179 (size=12663) 2024-11-20T22:24:25,611 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/da726a3c9bdb4d2ea009521d6e8a02a0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/da726a3c9bdb4d2ea009521d6e8a02a0 2024-11-20T22:24:25,630 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4bf719aa074347e1a05b5f5af2170a6f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4bf719aa074347e1a05b5f5af2170a6f 2024-11-20T22:24:25,632 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into da726a3c9bdb4d2ea009521d6e8a02a0(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:25,632 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:25,632 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=13, startTime=1732141465464; duration=0sec 2024-11-20T22:24:25,632 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:25,633 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:25,643 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into 4bf719aa074347e1a05b5f5af2170a6f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:25,643 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:25,643 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=13, startTime=1732141465465; duration=0sec 2024-11-20T22:24:25,643 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:25,643 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:25,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T22:24:25,882 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-20T22:24:25,892 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-20T22:24:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:25,894 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:25,894 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:25,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:25,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:24:25,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:25,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:25,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:25,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:25,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:25,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:25,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d748393b7f704028842b676ed3e45a8c_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141464761/Put/seqid=0 2024-11-20T22:24:25,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141525936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141525937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141525945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141525948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141525951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:25,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742005_1181 (size=14794) 2024-11-20T22:24:25,990 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:26,005 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d748393b7f704028842b676ed3e45a8c_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d748393b7f704028842b676ed3e45a8c_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:26,011 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/42266aab2bb24eb88c6f958c75db7eac, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:26,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/42266aab2bb24eb88c6f958c75db7eac is 175, key is test_row_0/A:col10/1732141464761/Put/seqid=0 2024-11-20T22:24:26,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141526043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,047 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:26,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:26,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141526050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742006_1182 (size=39749) 2024-11-20T22:24:26,067 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=242, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/42266aab2bb24eb88c6f958c75db7eac 2024-11-20T22:24:26,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/1919a0ba08d24a438dcdd93e1869cb77 is 50, key is test_row_0/B:col10/1732141464761/Put/seqid=0 2024-11-20T22:24:26,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742007_1183 (size=12151) 2024-11-20T22:24:26,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/1919a0ba08d24a438dcdd93e1869cb77 2024-11-20T22:24:26,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/62d4b8245cda4510a9086d1f260c2768 is 50, key is test_row_0/C:col10/1732141464761/Put/seqid=0 2024-11-20T22:24:26,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742008_1184 (size=12151) 2024-11-20T22:24:26,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/62d4b8245cda4510a9086d1f260c2768 2024-11-20T22:24:26,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:26,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/42266aab2bb24eb88c6f958c75db7eac as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/42266aab2bb24eb88c6f958c75db7eac 2024-11-20T22:24:26,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/42266aab2bb24eb88c6f958c75db7eac, entries=200, sequenceid=242, filesize=38.8 K 2024-11-20T22:24:26,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/1919a0ba08d24a438dcdd93e1869cb77 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1919a0ba08d24a438dcdd93e1869cb77 2024-11-20T22:24:26,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1919a0ba08d24a438dcdd93e1869cb77, entries=150, sequenceid=242, filesize=11.9 K 2024-11-20T22:24:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/62d4b8245cda4510a9086d1f260c2768 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/62d4b8245cda4510a9086d1f260c2768 2024-11-20T22:24:26,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/62d4b8245cda4510a9086d1f260c2768, entries=150, sequenceid=242, filesize=11.9 K 2024-11-20T22:24:26,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for fe73e78f2490c46e0778d445404a6f5f in 340ms, sequenceid=242, compaction requested=false 2024-11-20T22:24:26,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:26,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:26,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T22:24:26,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:26,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:26,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:26,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:26,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:26,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:26,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f292715b3050476b8a717f39b0101a2b_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141465940/Put/seqid=0 2024-11-20T22:24:26,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:26,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141526307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:26,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141526307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742009_1185 (size=12404) 2024-11-20T22:24:26,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:26,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:26,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,356 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:26,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141526413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:26,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141526413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:26,511 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:26,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:26,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,537 INFO [master/6365a1e51efd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T22:24:26,537 INFO [master/6365a1e51efd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T22:24:26,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:26,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141526616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:26,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141526619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,665 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:26,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:26,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,719 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:26,771 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f292715b3050476b8a717f39b0101a2b_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f292715b3050476b8a717f39b0101a2b_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:26,773 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/ed6a8de06576429e88f2119694cf553c, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:26,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/ed6a8de06576429e88f2119694cf553c is 175, key is test_row_0/A:col10/1732141465940/Put/seqid=0 2024-11-20T22:24:26,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742010_1186 (size=31205) 2024-11-20T22:24:26,821 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=259, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/ed6a8de06576429e88f2119694cf553c 2024-11-20T22:24:26,830 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4f08f16d5b4c4fb5be3c7b921d445f03 is 50, key is test_row_0/B:col10/1732141465940/Put/seqid=0 2024-11-20T22:24:26,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:26,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:26,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742011_1187 (size=12251) 2024-11-20T22:24:26,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4f08f16d5b4c4fb5be3c7b921d445f03 2024-11-20T22:24:26,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/983911b122ae4ce1bcbc1b3f09241f39 is 50, key is test_row_0/C:col10/1732141465940/Put/seqid=0 2024-11-20T22:24:26,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742012_1188 (size=12251) 2024-11-20T22:24:26,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/983911b122ae4ce1bcbc1b3f09241f39 2024-11-20T22:24:26,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/ed6a8de06576429e88f2119694cf553c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ed6a8de06576429e88f2119694cf553c 2024-11-20T22:24:26,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ed6a8de06576429e88f2119694cf553c, entries=150, sequenceid=259, filesize=30.5 K 2024-11-20T22:24:26,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4f08f16d5b4c4fb5be3c7b921d445f03 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4f08f16d5b4c4fb5be3c7b921d445f03 2024-11-20T22:24:26,907 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4f08f16d5b4c4fb5be3c7b921d445f03, entries=150, sequenceid=259, filesize=12.0 K 2024-11-20T22:24:26,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/983911b122ae4ce1bcbc1b3f09241f39 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/983911b122ae4ce1bcbc1b3f09241f39 2024-11-20T22:24:26,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/983911b122ae4ce1bcbc1b3f09241f39, entries=150, sequenceid=259, filesize=12.0 K 2024-11-20T22:24:26,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for fe73e78f2490c46e0778d445404a6f5f in 671ms, sequenceid=259, compaction requested=true 2024-11-20T22:24:26,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:26,923 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:26,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:26,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:26,924 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102571 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:26,924 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:26,924 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,924 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/da726a3c9bdb4d2ea009521d6e8a02a0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/42266aab2bb24eb88c6f958c75db7eac, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ed6a8de06576429e88f2119694cf553c] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=100.2 K 2024-11-20T22:24:26,924 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,924 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/da726a3c9bdb4d2ea009521d6e8a02a0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/42266aab2bb24eb88c6f958c75db7eac, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ed6a8de06576429e88f2119694cf553c] 2024-11-20T22:24:26,925 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting da726a3c9bdb4d2ea009521d6e8a02a0, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732141463574 2024-11-20T22:24:26,925 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42266aab2bb24eb88c6f958c75db7eac, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732141464754 2024-11-20T22:24:26,926 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed6a8de06576429e88f2119694cf553c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732141465922 2024-11-20T22:24:26,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:26,931 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:26,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:26,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:26,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:26,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:26,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:24:26,937 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:26,937 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:26,937 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,937 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/563cdbf6b66c484aa5bfd412bf23c8a2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1919a0ba08d24a438dcdd93e1869cb77, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4f08f16d5b4c4fb5be3c7b921d445f03] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=36.2 K 2024-11-20T22:24:26,943 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:26,951 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 563cdbf6b66c484aa5bfd412bf23c8a2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732141463574 2024-11-20T22:24:26,953 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1919a0ba08d24a438dcdd93e1869cb77, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732141464761 2024-11-20T22:24:26,959 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112062dfa62f726a4be39436924346584516_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:26,961 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112062dfa62f726a4be39436924346584516_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:26,961 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112062dfa62f726a4be39436924346584516_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:26,962 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f08f16d5b4c4fb5be3c7b921d445f03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732141465922 2024-11-20T22:24:26,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:26,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:26,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:26,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:26,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:26,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:26,984 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#154 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:26,985 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/ac9d73a93dac4938a585e38fa639f277 is 50, key is test_row_0/B:col10/1732141465940/Put/seqid=0 2024-11-20T22:24:26,987 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:26,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:26,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:26,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:26,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:26,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:27,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141527023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141527024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141527033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141527034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141527055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742013_1189 (size=4469) 2024-11-20T22:24:27,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209115d962e5554718b29894c29bed0994_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141466306/Put/seqid=0 2024-11-20T22:24:27,094 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#153 average throughput is 0.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:27,095 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/9d986dc846de45fab2f245c0843b3419 is 175, key is test_row_0/A:col10/1732141465940/Put/seqid=0 2024-11-20T22:24:27,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742014_1190 (size=12865) 2024-11-20T22:24:27,129 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/ac9d73a93dac4938a585e38fa639f277 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/ac9d73a93dac4938a585e38fa639f277 2024-11-20T22:24:27,138 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into ac9d73a93dac4938a585e38fa639f277(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:27,138 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:27,139 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=13, startTime=1732141466931; duration=0sec 2024-11-20T22:24:27,139 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:27,139 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:27,139 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:27,140 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:27,140 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:27,140 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,140 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4bf719aa074347e1a05b5f5af2170a6f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/62d4b8245cda4510a9086d1f260c2768, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/983911b122ae4ce1bcbc1b3f09241f39] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=36.2 K 2024-11-20T22:24:27,141 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bf719aa074347e1a05b5f5af2170a6f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732141463574 2024-11-20T22:24:27,141 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 62d4b8245cda4510a9086d1f260c2768, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732141464761 2024-11-20T22:24:27,142 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 983911b122ae4ce1bcbc1b3f09241f39, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732141465922 2024-11-20T22:24:27,148 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141527136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141527145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141527157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141527161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141527162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742016_1192 (size=31819) 2024-11-20T22:24:27,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742015_1191 (size=17534) 2024-11-20T22:24:27,177 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:27,211 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#156 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:27,212 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/1343783d310a456f8e3ce79fcdaed03e is 50, key is test_row_0/C:col10/1732141465940/Put/seqid=0 2024-11-20T22:24:27,220 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209115d962e5554718b29894c29bed0994_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209115d962e5554718b29894c29bed0994_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:27,222 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/9d986dc846de45fab2f245c0843b3419 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/9d986dc846de45fab2f245c0843b3419 2024-11-20T22:24:27,222 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c96ea94238a04a7988ff379747be9d52, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:27,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c96ea94238a04a7988ff379747be9d52 is 175, key is test_row_0/A:col10/1732141466306/Put/seqid=0 2024-11-20T22:24:27,246 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into 9d986dc846de45fab2f245c0843b3419(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:27,246 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:27,246 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=13, startTime=1732141466922; duration=0sec 2024-11-20T22:24:27,246 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:27,247 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:27,305 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:27,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:27,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742017_1193 (size=12865) 2024-11-20T22:24:27,324 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/1343783d310a456f8e3ce79fcdaed03e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/1343783d310a456f8e3ce79fcdaed03e 2024-11-20T22:24:27,334 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into 1343783d310a456f8e3ce79fcdaed03e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:27,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742018_1194 (size=48639) 2024-11-20T22:24:27,336 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=281, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c96ea94238a04a7988ff379747be9d52 2024-11-20T22:24:27,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4c9b54ca335748299f7223b9f74c82ff is 50, key is test_row_0/B:col10/1732141466306/Put/seqid=0 2024-11-20T22:24:27,360 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:27,360 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=13, startTime=1732141466932; duration=0sec 2024-11-20T22:24:27,360 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:27,360 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:27,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141527366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141527367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141527366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141527373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141527374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742019_1195 (size=12301) 2024-11-20T22:24:27,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4c9b54ca335748299f7223b9f74c82ff 2024-11-20T22:24:27,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4e2c4507b0254407ba6f6fa2006a0245 is 50, key is test_row_0/C:col10/1732141466306/Put/seqid=0 2024-11-20T22:24:27,469 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742020_1196 (size=12301) 2024-11-20T22:24:27,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:27,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:27,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,650 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:27,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:27,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141527677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141527682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141527682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141527683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:27,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141527688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,833 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:27,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:27,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,834 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:27,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4e2c4507b0254407ba6f6fa2006a0245 2024-11-20T22:24:27,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c96ea94238a04a7988ff379747be9d52 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c96ea94238a04a7988ff379747be9d52 2024-11-20T22:24:27,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c96ea94238a04a7988ff379747be9d52, entries=250, sequenceid=281, filesize=47.5 K 2024-11-20T22:24:27,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4c9b54ca335748299f7223b9f74c82ff as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4c9b54ca335748299f7223b9f74c82ff 2024-11-20T22:24:27,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4c9b54ca335748299f7223b9f74c82ff, entries=150, sequenceid=281, filesize=12.0 K 2024-11-20T22:24:27,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4e2c4507b0254407ba6f6fa2006a0245 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4e2c4507b0254407ba6f6fa2006a0245 2024-11-20T22:24:27,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4e2c4507b0254407ba6f6fa2006a0245, entries=150, sequenceid=281, filesize=12.0 K 2024-11-20T22:24:27,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for fe73e78f2490c46e0778d445404a6f5f in 1003ms, sequenceid=281, compaction requested=false 2024-11-20T22:24:27,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:27,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:27,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T22:24:27,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:27,992 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:27,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:27,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:27,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:27,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:27,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:27,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:27,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:28,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dace773c917a44cbb8c5fae8b22523e8_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141467028/Put/seqid=0 2024-11-20T22:24:28,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742021_1197 (size=12454) 2024-11-20T22:24:28,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:28,117 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dace773c917a44cbb8c5fae8b22523e8_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dace773c917a44cbb8c5fae8b22523e8_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:28,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/12f4b4bde5424c74bbe511eb86b066fd, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:28,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/12f4b4bde5424c74bbe511eb86b066fd is 175, key is test_row_0/A:col10/1732141467028/Put/seqid=0 2024-11-20T22:24:28,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742022_1198 (size=31255) 2024-11-20T22:24:28,150 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=299, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/12f4b4bde5424c74bbe511eb86b066fd 2024-11-20T22:24:28,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4aa5e8015d724c689a1a3cbc40c9a0f3 is 50, key is test_row_0/B:col10/1732141467028/Put/seqid=0 2024-11-20T22:24:28,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:28,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742023_1199 (size=12301) 2024-11-20T22:24:28,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141528243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141528247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141528248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141528250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141528248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141528353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141528353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141528359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141528367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141528379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141528562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141528567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141528558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141528575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141528586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,611 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4aa5e8015d724c689a1a3cbc40c9a0f3 2024-11-20T22:24:28,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/304a3537093740e6b208e3e4b1fb44f1 is 50, key is test_row_0/C:col10/1732141467028/Put/seqid=0 2024-11-20T22:24:28,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742024_1200 (size=12301) 2024-11-20T22:24:28,699 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/304a3537093740e6b208e3e4b1fb44f1 2024-11-20T22:24:28,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/12f4b4bde5424c74bbe511eb86b066fd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/12f4b4bde5424c74bbe511eb86b066fd 2024-11-20T22:24:28,714 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/12f4b4bde5424c74bbe511eb86b066fd, entries=150, sequenceid=299, filesize=30.5 K 2024-11-20T22:24:28,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/4aa5e8015d724c689a1a3cbc40c9a0f3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4aa5e8015d724c689a1a3cbc40c9a0f3 2024-11-20T22:24:28,727 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4aa5e8015d724c689a1a3cbc40c9a0f3, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T22:24:28,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/304a3537093740e6b208e3e4b1fb44f1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/304a3537093740e6b208e3e4b1fb44f1 2024-11-20T22:24:28,738 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/304a3537093740e6b208e3e4b1fb44f1, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T22:24:28,740 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for fe73e78f2490c46e0778d445404a6f5f in 748ms, sequenceid=299, compaction requested=true 2024-11-20T22:24:28,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:28,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:28,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-20T22:24:28,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-20T22:24:28,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-20T22:24:28,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8490 sec 2024-11-20T22:24:28,748 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.8540 sec 2024-11-20T22:24:28,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:28,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:24:28,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:28,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:28,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:28,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:28,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:28,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:28,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141528909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141528910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141528913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141528918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205c79d245f9cf4a7fbbdfef53933e4783_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141468876/Put/seqid=0 2024-11-20T22:24:28,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:28,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141528928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:28,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742025_1201 (size=17534) 2024-11-20T22:24:28,974 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:28,982 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205c79d245f9cf4a7fbbdfef53933e4783_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205c79d245f9cf4a7fbbdfef53933e4783_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:28,983 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/54149b9697094dd7865135b38120d7e4, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:28,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/54149b9697094dd7865135b38120d7e4 is 175, key is test_row_0/A:col10/1732141468876/Put/seqid=0 2024-11-20T22:24:29,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742026_1202 (size=48639) 2024-11-20T22:24:29,021 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=323, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/54149b9697094dd7865135b38120d7e4 2024-11-20T22:24:29,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141529023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141529023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141529024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141529032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141529036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/876aae8822834076b33497cad042a771 is 50, key is test_row_0/B:col10/1732141468876/Put/seqid=0 2024-11-20T22:24:29,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742027_1203 (size=12301) 2024-11-20T22:24:29,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141529231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141529235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141529233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141529241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141529242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,493 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/876aae8822834076b33497cad042a771 2024-11-20T22:24:29,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/e1b2744c66504b7784bd89ea7e39c663 is 50, key is test_row_0/C:col10/1732141468876/Put/seqid=0 2024-11-20T22:24:29,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141529548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141529549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141529550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141529553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742028_1204 (size=12301) 2024-11-20T22:24:29,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/e1b2744c66504b7784bd89ea7e39c663 2024-11-20T22:24:29,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141529554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:29,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/54149b9697094dd7865135b38120d7e4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/54149b9697094dd7865135b38120d7e4 2024-11-20T22:24:29,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/54149b9697094dd7865135b38120d7e4, entries=250, sequenceid=323, filesize=47.5 K 2024-11-20T22:24:29,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/876aae8822834076b33497cad042a771 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/876aae8822834076b33497cad042a771 2024-11-20T22:24:29,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/876aae8822834076b33497cad042a771, entries=150, sequenceid=323, filesize=12.0 K 2024-11-20T22:24:29,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/e1b2744c66504b7784bd89ea7e39c663 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/e1b2744c66504b7784bd89ea7e39c663 2024-11-20T22:24:29,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/e1b2744c66504b7784bd89ea7e39c663, entries=150, sequenceid=323, filesize=12.0 K 2024-11-20T22:24:29,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for fe73e78f2490c46e0778d445404a6f5f in 742ms, sequenceid=323, compaction requested=true 2024-11-20T22:24:29,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:29,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:29,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:29,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:29,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:29,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:29,619 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:29,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:29,620 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:29,622 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 160352 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:29,622 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:29,622 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49768 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:29,622 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:29,622 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:29,622 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:29,622 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/ac9d73a93dac4938a585e38fa639f277, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4c9b54ca335748299f7223b9f74c82ff, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4aa5e8015d724c689a1a3cbc40c9a0f3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/876aae8822834076b33497cad042a771] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=48.6 K 2024-11-20T22:24:29,622 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/9d986dc846de45fab2f245c0843b3419, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c96ea94238a04a7988ff379747be9d52, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/12f4b4bde5424c74bbe511eb86b066fd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/54149b9697094dd7865135b38120d7e4] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=156.6 K 2024-11-20T22:24:29,622 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:29,622 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/9d986dc846de45fab2f245c0843b3419, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c96ea94238a04a7988ff379747be9d52, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/12f4b4bde5424c74bbe511eb86b066fd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/54149b9697094dd7865135b38120d7e4] 2024-11-20T22:24:29,622 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ac9d73a93dac4938a585e38fa639f277, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732141465922 2024-11-20T22:24:29,623 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d986dc846de45fab2f245c0843b3419, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732141465922 2024-11-20T22:24:29,623 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c9b54ca335748299f7223b9f74c82ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1732141466306 2024-11-20T22:24:29,624 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4aa5e8015d724c689a1a3cbc40c9a0f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732141467021 2024-11-20T22:24:29,624 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c96ea94238a04a7988ff379747be9d52, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1732141466306 2024-11-20T22:24:29,624 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 876aae8822834076b33497cad042a771, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732141468241 2024-11-20T22:24:29,625 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12f4b4bde5424c74bbe511eb86b066fd, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732141467021 2024-11-20T22:24:29,625 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54149b9697094dd7865135b38120d7e4, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732141468241 2024-11-20T22:24:29,656 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#165 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:29,657 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/72e149497a054a9eb480373af3bd301f is 50, key is test_row_0/B:col10/1732141468876/Put/seqid=0 2024-11-20T22:24:29,675 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:29,707 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411202dfa9205f8064f6a847909492d160807_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:29,710 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411202dfa9205f8064f6a847909492d160807_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:29,710 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202dfa9205f8064f6a847909492d160807_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:29,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742029_1205 (size=13051) 2024-11-20T22:24:29,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742030_1206 (size=4469) 2024-11-20T22:24:29,757 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/72e149497a054a9eb480373af3bd301f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/72e149497a054a9eb480373af3bd301f 2024-11-20T22:24:29,761 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#166 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:29,761 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/b47fcc50e0af46e8b29d7b53d7e64f99 is 175, key is test_row_0/A:col10/1732141468876/Put/seqid=0 2024-11-20T22:24:29,767 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into 72e149497a054a9eb480373af3bd301f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:29,767 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:29,767 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=12, startTime=1732141469619; duration=0sec 2024-11-20T22:24:29,767 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:29,767 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:29,767 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:29,768 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49768 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:29,769 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:29,769 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:29,769 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/1343783d310a456f8e3ce79fcdaed03e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4e2c4507b0254407ba6f6fa2006a0245, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/304a3537093740e6b208e3e4b1fb44f1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/e1b2744c66504b7784bd89ea7e39c663] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=48.6 K 2024-11-20T22:24:29,769 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1343783d310a456f8e3ce79fcdaed03e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732141465922 2024-11-20T22:24:29,770 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e2c4507b0254407ba6f6fa2006a0245, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1732141466306 2024-11-20T22:24:29,775 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 304a3537093740e6b208e3e4b1fb44f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732141467021 2024-11-20T22:24:29,778 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e1b2744c66504b7784bd89ea7e39c663, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732141468241 2024-11-20T22:24:29,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742031_1207 (size=32005) 2024-11-20T22:24:29,810 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#167 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:29,810 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/b47fcc50e0af46e8b29d7b53d7e64f99 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b47fcc50e0af46e8b29d7b53d7e64f99 2024-11-20T22:24:29,811 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/b30f0d86973942a4997056212afbfac8 is 50, key is test_row_0/C:col10/1732141468876/Put/seqid=0 2024-11-20T22:24:29,821 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into b47fcc50e0af46e8b29d7b53d7e64f99(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:29,822 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:29,822 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=12, startTime=1732141469619; duration=0sec 2024-11-20T22:24:29,822 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:29,822 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:29,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742032_1208 (size=13051) 2024-11-20T22:24:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T22:24:29,999 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-20T22:24:30,008 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:30,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-20T22:24:30,010 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:30,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:30,012 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:30,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:30,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:30,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d32a79bbbe264b5986b9459f7b763312_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141470070/Put/seqid=0 2024-11-20T22:24:30,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141530111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141530115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141530120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141530127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141530128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742033_1209 (size=12454) 2024-11-20T22:24:30,168 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:30,177 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:30,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:30,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:30,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:30,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,207 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d32a79bbbe264b5986b9459f7b763312_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d32a79bbbe264b5986b9459f7b763312_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:30,215 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/e647b5ee89b94006aab8a90883f98527, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:30,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/e647b5ee89b94006aab8a90883f98527 is 175, key is test_row_0/A:col10/1732141470070/Put/seqid=0 2024-11-20T22:24:30,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141530223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141530246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141530250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141530251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141530252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742034_1210 (size=31255) 2024-11-20T22:24:30,270 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=341, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/e647b5ee89b94006aab8a90883f98527 2024-11-20T22:24:30,271 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/b30f0d86973942a4997056212afbfac8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b30f0d86973942a4997056212afbfac8 2024-11-20T22:24:30,284 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into b30f0d86973942a4997056212afbfac8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:30,285 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:30,285 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=12, startTime=1732141469619; duration=0sec 2024-11-20T22:24:30,285 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:30,285 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:30,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/77f4af9e275545b395f2636064a367fc is 50, key is test_row_0/B:col10/1732141470070/Put/seqid=0 2024-11-20T22:24:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:30,335 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:30,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:30,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:30,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:30,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742035_1211 (size=12301) 2024-11-20T22:24:30,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/77f4af9e275545b395f2636064a367fc 2024-11-20T22:24:30,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/ab5644ae44574e92b55868a9c4f5fd45 is 50, key is test_row_0/C:col10/1732141470070/Put/seqid=0 2024-11-20T22:24:30,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141530431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141530460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141530460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141530461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742036_1212 (size=12301) 2024-11-20T22:24:30,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/ab5644ae44574e92b55868a9c4f5fd45 2024-11-20T22:24:30,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141530475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/e647b5ee89b94006aab8a90883f98527 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e647b5ee89b94006aab8a90883f98527 2024-11-20T22:24:30,497 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:30,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:30,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:30,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:30,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:30,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e647b5ee89b94006aab8a90883f98527, entries=150, sequenceid=341, filesize=30.5 K 2024-11-20T22:24:30,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/77f4af9e275545b395f2636064a367fc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/77f4af9e275545b395f2636064a367fc 2024-11-20T22:24:30,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/77f4af9e275545b395f2636064a367fc, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T22:24:30,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/ab5644ae44574e92b55868a9c4f5fd45 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ab5644ae44574e92b55868a9c4f5fd45 2024-11-20T22:24:30,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ab5644ae44574e92b55868a9c4f5fd45, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T22:24:30,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for fe73e78f2490c46e0778d445404a6f5f in 460ms, sequenceid=341, compaction requested=false 2024-11-20T22:24:30,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:30,662 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T22:24:30,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:30,663 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:24:30,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:30,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:30,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:30,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:30,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120144ead4bea1e48c8b25c583ade9b53e1_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141470116/Put/seqid=0 2024-11-20T22:24:30,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:30,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:30,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742037_1213 (size=12454) 2024-11-20T22:24:30,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:30,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141530795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141530798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141530798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141530798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141530800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,819 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120144ead4bea1e48c8b25c583ade9b53e1_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120144ead4bea1e48c8b25c583ade9b53e1_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:30,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/120a5fa1252845138001ef8f877b2bcf, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:30,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/120a5fa1252845138001ef8f877b2bcf is 175, key is test_row_0/A:col10/1732141470116/Put/seqid=0 2024-11-20T22:24:30,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742038_1214 (size=31255) 2024-11-20T22:24:30,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141530907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141530911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141530911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:30,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:30,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141530911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:31,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141531114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141531116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141531121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141531121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,287 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=363, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/120a5fa1252845138001ef8f877b2bcf 2024-11-20T22:24:31,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/68bac55bc5ae4e5ea762b1e37ceaef1d is 50, key is test_row_0/B:col10/1732141470116/Put/seqid=0 2024-11-20T22:24:31,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141531319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742039_1215 (size=12301) 2024-11-20T22:24:31,360 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/68bac55bc5ae4e5ea762b1e37ceaef1d 2024-11-20T22:24:31,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/bff000b978914381b1a0619de0b68b58 is 50, key is test_row_0/C:col10/1732141470116/Put/seqid=0 2024-11-20T22:24:31,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742040_1216 (size=12301) 2024-11-20T22:24:31,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141531421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141531422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141531432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141531432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:31,827 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/bff000b978914381b1a0619de0b68b58 2024-11-20T22:24:31,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/120a5fa1252845138001ef8f877b2bcf as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/120a5fa1252845138001ef8f877b2bcf 2024-11-20T22:24:31,846 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/120a5fa1252845138001ef8f877b2bcf, entries=150, sequenceid=363, filesize=30.5 K 2024-11-20T22:24:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/68bac55bc5ae4e5ea762b1e37ceaef1d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/68bac55bc5ae4e5ea762b1e37ceaef1d 2024-11-20T22:24:31,854 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/68bac55bc5ae4e5ea762b1e37ceaef1d, entries=150, sequenceid=363, filesize=12.0 K 2024-11-20T22:24:31,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/bff000b978914381b1a0619de0b68b58 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/bff000b978914381b1a0619de0b68b58 2024-11-20T22:24:31,866 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/bff000b978914381b1a0619de0b68b58, entries=150, sequenceid=363, filesize=12.0 K 2024-11-20T22:24:31,867 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for fe73e78f2490c46e0778d445404a6f5f in 1205ms, sequenceid=363, compaction requested=true 2024-11-20T22:24:31,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:31,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:31,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-20T22:24:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-20T22:24:31,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-20T22:24:31,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8640 sec 2024-11-20T22:24:31,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.8700 sec 2024-11-20T22:24:31,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T22:24:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:31,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:31,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:31,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:31,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:31,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:31,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:31,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bec71bb475bf4cffa956572e2b076c91_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141470797/Put/seqid=0 2024-11-20T22:24:32,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141532007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141532009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141532013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141532015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742041_1217 (size=17534) 2024-11-20T22:24:32,047 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:32,056 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bec71bb475bf4cffa956572e2b076c91_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bec71bb475bf4cffa956572e2b076c91_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:32,058 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/0c29799ef01a447f8562ca822ac10103, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:32,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/0c29799ef01a447f8562ca822ac10103 is 175, key is test_row_0/A:col10/1732141470797/Put/seqid=0 2024-11-20T22:24:32,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742042_1218 (size=48639) 2024-11-20T22:24:32,105 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=380, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/0c29799ef01a447f8562ca822ac10103 2024-11-20T22:24:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T22:24:32,118 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-20T22:24:32,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141532116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f3733cd4927c4303b5a6b68a693f7f17 is 50, key is test_row_0/B:col10/1732141470797/Put/seqid=0 2024-11-20T22:24:32,133 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-20T22:24:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:32,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141532127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141532127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141532128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,140 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:32,141 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:32,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:32,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742043_1219 (size=12301) 2024-11-20T22:24:32,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f3733cd4927c4303b5a6b68a693f7f17 2024-11-20T22:24:32,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d84a6e0e9eb44cdba0d5118daf7623f8 is 50, key is test_row_0/C:col10/1732141470797/Put/seqid=0 2024-11-20T22:24:32,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742044_1220 (size=12301) 2024-11-20T22:24:32,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d84a6e0e9eb44cdba0d5118daf7623f8 2024-11-20T22:24:32,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/0c29799ef01a447f8562ca822ac10103 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/0c29799ef01a447f8562ca822ac10103 2024-11-20T22:24:32,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:32,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/0c29799ef01a447f8562ca822ac10103, entries=250, sequenceid=380, filesize=47.5 K 2024-11-20T22:24:32,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/f3733cd4927c4303b5a6b68a693f7f17 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f3733cd4927c4303b5a6b68a693f7f17 2024-11-20T22:24:32,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f3733cd4927c4303b5a6b68a693f7f17, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T22:24:32,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d84a6e0e9eb44cdba0d5118daf7623f8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d84a6e0e9eb44cdba0d5118daf7623f8 2024-11-20T22:24:32,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d84a6e0e9eb44cdba0d5118daf7623f8, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T22:24:32,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for fe73e78f2490c46e0778d445404a6f5f in 300ms, sequenceid=380, compaction requested=true 2024-11-20T22:24:32,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:32,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:32,265 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:32,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:32,265 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:32,267 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143154 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:32,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:32,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:32,267 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:32,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:32,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:32,267 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:32,267 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b47fcc50e0af46e8b29d7b53d7e64f99, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e647b5ee89b94006aab8a90883f98527, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/120a5fa1252845138001ef8f877b2bcf, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/0c29799ef01a447f8562ca822ac10103] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=139.8 K 2024-11-20T22:24:32,267 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:32,267 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b47fcc50e0af46e8b29d7b53d7e64f99, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e647b5ee89b94006aab8a90883f98527, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/120a5fa1252845138001ef8f877b2bcf, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/0c29799ef01a447f8562ca822ac10103] 2024-11-20T22:24:32,267 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:32,267 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:32,267 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:32,267 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/72e149497a054a9eb480373af3bd301f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/77f4af9e275545b395f2636064a367fc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/68bac55bc5ae4e5ea762b1e37ceaef1d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f3733cd4927c4303b5a6b68a693f7f17] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=48.8 K 2024-11-20T22:24:32,268 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b47fcc50e0af46e8b29d7b53d7e64f99, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732141468241 2024-11-20T22:24:32,268 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 72e149497a054a9eb480373af3bd301f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732141468241 2024-11-20T22:24:32,268 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e647b5ee89b94006aab8a90883f98527, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141468910 2024-11-20T22:24:32,269 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 77f4af9e275545b395f2636064a367fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141468910 2024-11-20T22:24:32,269 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 120a5fa1252845138001ef8f877b2bcf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732141470088 2024-11-20T22:24:32,269 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 68bac55bc5ae4e5ea762b1e37ceaef1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732141470088 2024-11-20T22:24:32,269 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c29799ef01a447f8562ca822ac10103, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732141470794 2024-11-20T22:24:32,269 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f3733cd4927c4303b5a6b68a693f7f17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732141470797 2024-11-20T22:24:32,287 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:32,293 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T22:24:32,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:32,295 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T22:24:32,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:32,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:32,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:32,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:32,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:32,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:32,316 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#178 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:32,317 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/956957548c044981b4f09d28e0e41dee is 50, key is test_row_0/B:col10/1732141470797/Put/seqid=0 2024-11-20T22:24:32,327 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120d20cc8986aca4e06853bc3437488a3fb_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:32,330 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120d20cc8986aca4e06853bc3437488a3fb_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:32,330 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d20cc8986aca4e06853bc3437488a3fb_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:32,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:32,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:32,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d5ca6f9f55d948179221450269267513_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141472010/Put/seqid=0 2024-11-20T22:24:32,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742045_1221 (size=13187) 2024-11-20T22:24:32,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141532382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141532383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141532384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141532387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141532389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:32,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742047_1223 (size=12454) 2024-11-20T22:24:32,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:32,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742046_1222 (size=4469) 2024-11-20T22:24:32,475 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d5ca6f9f55d948179221450269267513_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d5ca6f9f55d948179221450269267513_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:32,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/e00e6cabd2524dcaa5adb2a3fe3003d2, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:32,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/e00e6cabd2524dcaa5adb2a3fe3003d2 is 175, key is test_row_0/A:col10/1732141472010/Put/seqid=0 2024-11-20T22:24:32,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141532498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141532498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141532499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141532510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141532517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742048_1224 (size=31255) 2024-11-20T22:24:32,547 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=399, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/e00e6cabd2524dcaa5adb2a3fe3003d2 2024-11-20T22:24:32,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/da01a25fe8664b42b3864a9c25263a0a is 50, key is test_row_0/B:col10/1732141472010/Put/seqid=0 2024-11-20T22:24:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742049_1225 (size=12301) 2024-11-20T22:24:32,619 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/da01a25fe8664b42b3864a9c25263a0a 2024-11-20T22:24:32,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2e709365ca2e4990b3ceddc0d768e4d2 is 50, key is test_row_0/C:col10/1732141472010/Put/seqid=0 2024-11-20T22:24:32,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141532715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141532722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141532723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742050_1226 (size=12301) 2024-11-20T22:24:32,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:32,743 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2e709365ca2e4990b3ceddc0d768e4d2 2024-11-20T22:24:32,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141532738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:32,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141532747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:32,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/e00e6cabd2524dcaa5adb2a3fe3003d2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e00e6cabd2524dcaa5adb2a3fe3003d2 2024-11-20T22:24:32,813 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/956957548c044981b4f09d28e0e41dee as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/956957548c044981b4f09d28e0e41dee 2024-11-20T22:24:32,825 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e00e6cabd2524dcaa5adb2a3fe3003d2, entries=150, sequenceid=399, filesize=30.5 K 2024-11-20T22:24:32,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/da01a25fe8664b42b3864a9c25263a0a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da01a25fe8664b42b3864a9c25263a0a 2024-11-20T22:24:32,839 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into 956957548c044981b4f09d28e0e41dee(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:32,839 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:32,839 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=12, startTime=1732141472265; duration=0sec 2024-11-20T22:24:32,839 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:32,839 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:32,839 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:32,841 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:32,842 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:32,842 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:32,842 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b30f0d86973942a4997056212afbfac8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ab5644ae44574e92b55868a9c4f5fd45, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/bff000b978914381b1a0619de0b68b58, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d84a6e0e9eb44cdba0d5118daf7623f8] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=48.8 K 2024-11-20T22:24:32,843 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b30f0d86973942a4997056212afbfac8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732141468241 2024-11-20T22:24:32,843 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ab5644ae44574e92b55868a9c4f5fd45, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141468910 2024-11-20T22:24:32,844 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting bff000b978914381b1a0619de0b68b58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732141470088 2024-11-20T22:24:32,845 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d84a6e0e9eb44cdba0d5118daf7623f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732141470797 2024-11-20T22:24:32,854 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da01a25fe8664b42b3864a9c25263a0a, entries=150, sequenceid=399, filesize=12.0 K 2024-11-20T22:24:32,868 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#177 average throughput is 0.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:32,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2e709365ca2e4990b3ceddc0d768e4d2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2e709365ca2e4990b3ceddc0d768e4d2 2024-11-20T22:24:32,869 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/2f97909fab1247bb97c24db574d41adc is 175, key is test_row_0/A:col10/1732141470797/Put/seqid=0 2024-11-20T22:24:32,904 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2e709365ca2e4990b3ceddc0d768e4d2, entries=150, sequenceid=399, filesize=12.0 K 2024-11-20T22:24:32,907 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for fe73e78f2490c46e0778d445404a6f5f in 612ms, sequenceid=399, compaction requested=false 2024-11-20T22:24:32,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:32,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:32,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-20T22:24:32,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-20T22:24:32,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T22:24:32,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 779 msec 2024-11-20T22:24:32,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 788 msec 2024-11-20T22:24:32,943 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#182 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:32,944 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d97de9cd82454d49b1009ff07f0a9c77 is 50, key is test_row_0/C:col10/1732141470797/Put/seqid=0 2024-11-20T22:24:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742051_1227 (size=32141) 2024-11-20T22:24:32,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742052_1228 (size=13187) 2024-11-20T22:24:33,000 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/2f97909fab1247bb97c24db574d41adc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2f97909fab1247bb97c24db574d41adc 2024-11-20T22:24:33,001 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d97de9cd82454d49b1009ff07f0a9c77 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d97de9cd82454d49b1009ff07f0a9c77 2024-11-20T22:24:33,010 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into 2f97909fab1247bb97c24db574d41adc(size=31.4 K), total size for store is 61.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:33,012 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:33,012 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=12, startTime=1732141472264; duration=0sec 2024-11-20T22:24:33,012 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into d97de9cd82454d49b1009ff07f0a9c77(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:33,012 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:33,012 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:33,012 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:33,012 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=12, startTime=1732141472267; duration=0sec 2024-11-20T22:24:33,012 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:33,012 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:33,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T22:24:33,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:33,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:33,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:33,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:33,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:33,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:33,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:33,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112032a6ad1b0bed4d51acb37a56e6bc7fca_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141473042/Put/seqid=0 2024-11-20T22:24:33,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742053_1229 (size=14994) 2024-11-20T22:24:33,121 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:33,140 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112032a6ad1b0bed4d51acb37a56e6bc7fca_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112032a6ad1b0bed4d51acb37a56e6bc7fca_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:33,143 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c330081d6cd2455c9ab983edd5f60c2d, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:33,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c330081d6cd2455c9ab983edd5f60c2d is 175, key is test_row_0/A:col10/1732141473042/Put/seqid=0 2024-11-20T22:24:33,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742054_1230 (size=39949) 2024-11-20T22:24:33,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141533185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141533190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141533192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141533204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141533204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T22:24:33,255 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T22:24:33,265 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:33,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-20T22:24:33,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:33,267 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:33,267 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:33,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:33,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141533307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141533307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141533311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141533326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141533332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:33,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:33,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:33,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:33,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:33,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141533520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141533523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141533539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141533525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141533543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:33,573 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=420, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c330081d6cd2455c9ab983edd5f60c2d 2024-11-20T22:24:33,576 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:33,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:33,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:33,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:33,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/8fa7f80349634cbdb38aa38765be9f7c is 50, key is test_row_0/B:col10/1732141473042/Put/seqid=0 2024-11-20T22:24:33,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742055_1231 (size=12301) 2024-11-20T22:24:33,739 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:33,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141533832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141533843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141533855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141533859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:33,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141533863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:33,902 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:33,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:33,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:33,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:33,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:33,903 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:33,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/8fa7f80349634cbdb38aa38765be9f7c 2024-11-20T22:24:34,057 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:34,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:34,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/02e4af9142aa44e2889c2be7305f8338 is 50, key is test_row_0/C:col10/1732141473042/Put/seqid=0 2024-11-20T22:24:34,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742056_1232 (size=12301) 2024-11-20T22:24:34,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/02e4af9142aa44e2889c2be7305f8338 2024-11-20T22:24:34,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/c330081d6cd2455c9ab983edd5f60c2d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c330081d6cd2455c9ab983edd5f60c2d 2024-11-20T22:24:34,219 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:34,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:34,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c330081d6cd2455c9ab983edd5f60c2d, entries=200, sequenceid=420, filesize=39.0 K 2024-11-20T22:24:34,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/8fa7f80349634cbdb38aa38765be9f7c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8fa7f80349634cbdb38aa38765be9f7c 2024-11-20T22:24:34,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8fa7f80349634cbdb38aa38765be9f7c, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T22:24:34,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/02e4af9142aa44e2889c2be7305f8338 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/02e4af9142aa44e2889c2be7305f8338 2024-11-20T22:24:34,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/02e4af9142aa44e2889c2be7305f8338, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T22:24:34,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141534338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for fe73e78f2490c46e0778d445404a6f5f in 1327ms, sequenceid=420, compaction requested=true 2024-11-20T22:24:34,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:34,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:34,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:34,371 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:34,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:34,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:34,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:34,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:34,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:34,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:34,374 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:34,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:24:34,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:34,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:34,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:34,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:34,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:34,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:34,388 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103345 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:34,388 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:34,388 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,388 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2f97909fab1247bb97c24db574d41adc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e00e6cabd2524dcaa5adb2a3fe3003d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c330081d6cd2455c9ab983edd5f60c2d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=100.9 K 2024-11-20T22:24:34,388 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,388 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2f97909fab1247bb97c24db574d41adc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e00e6cabd2524dcaa5adb2a3fe3003d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c330081d6cd2455c9ab983edd5f60c2d] 2024-11-20T22:24:34,389 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:34,389 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:34,389 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,389 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/956957548c044981b4f09d28e0e41dee, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da01a25fe8664b42b3864a9c25263a0a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8fa7f80349634cbdb38aa38765be9f7c] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=36.9 K 2024-11-20T22:24:34,391 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f97909fab1247bb97c24db574d41adc, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732141470797 2024-11-20T22:24:34,395 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,395 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 956957548c044981b4f09d28e0e41dee, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732141470797 2024-11-20T22:24:34,395 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e00e6cabd2524dcaa5adb2a3fe3003d2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141472007 2024-11-20T22:24:34,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:34,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:34,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,403 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c330081d6cd2455c9ab983edd5f60c2d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141472365 2024-11-20T22:24:34,403 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting da01a25fe8664b42b3864a9c25263a0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141472007 2024-11-20T22:24:34,412 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fa7f80349634cbdb38aa38765be9f7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141472365 2024-11-20T22:24:34,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141534433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141534435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141534437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141534445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b12b298be8e945d193bb0ee5480b293b_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141474374/Put/seqid=0 2024-11-20T22:24:34,469 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:34,473 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#188 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:34,474 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/da9add30f9494b75a0c32d7e1c556b7a is 50, key is test_row_0/B:col10/1732141473042/Put/seqid=0 2024-11-20T22:24:34,487 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411200555aef2b46d4e57b533f4c2108367ce_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:34,489 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411200555aef2b46d4e57b533f4c2108367ce_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:34,489 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200555aef2b46d4e57b533f4c2108367ce_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:34,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742057_1233 (size=12454) 2024-11-20T22:24:34,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141534548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141534548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,557 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141534555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:34,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141534555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:34,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742059_1235 (size=4469) 2024-11-20T22:24:34,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742058_1234 (size=13289) 2024-11-20T22:24:34,712 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:34,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:34,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141534759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141534759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141534759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:34,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141534775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,867 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:34,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:34,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:34,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:34,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:34,927 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:34,978 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#187 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:34,979 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/8a0330fefd1f46ceac0179fc725260b8 is 175, key is test_row_0/A:col10/1732141473042/Put/seqid=0 2024-11-20T22:24:34,987 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b12b298be8e945d193bb0ee5480b293b_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b12b298be8e945d193bb0ee5480b293b_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:34,993 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/4ad2ae714e5d48dc93c3a5e2886dc6fd, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:34,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/4ad2ae714e5d48dc93c3a5e2886dc6fd is 175, key is test_row_0/A:col10/1732141474374/Put/seqid=0 2024-11-20T22:24:34,995 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/da9add30f9494b75a0c32d7e1c556b7a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da9add30f9494b75a0c32d7e1c556b7a 2024-11-20T22:24:35,007 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into da9add30f9494b75a0c32d7e1c556b7a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:35,007 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:35,007 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=13, startTime=1732141474371; duration=0sec 2024-11-20T22:24:35,008 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:35,008 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:35,008 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:35,009 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:35,010 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:35,010 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,010 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d97de9cd82454d49b1009ff07f0a9c77, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2e709365ca2e4990b3ceddc0d768e4d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/02e4af9142aa44e2889c2be7305f8338] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=36.9 K 2024-11-20T22:24:35,011 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d97de9cd82454d49b1009ff07f0a9c77, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732141470797 2024-11-20T22:24:35,012 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e709365ca2e4990b3ceddc0d768e4d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141472007 2024-11-20T22:24:35,013 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02e4af9142aa44e2889c2be7305f8338, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141472365 2024-11-20T22:24:35,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742060_1236 (size=32243) 2024-11-20T22:24:35,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:35,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:35,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141535063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141535064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141535067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742061_1237 (size=31255) 2024-11-20T22:24:35,077 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=441, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/4ad2ae714e5d48dc93c3a5e2886dc6fd 2024-11-20T22:24:35,097 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:35,097 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/ece1468cb3584a0e9185300c42724c41 is 50, key is test_row_0/C:col10/1732141473042/Put/seqid=0 2024-11-20T22:24:35,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141535094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/b61705d6040b4278b6625bfa89edbdf6 is 50, key is test_row_0/B:col10/1732141474374/Put/seqid=0 2024-11-20T22:24:35,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742062_1238 (size=13289) 2024-11-20T22:24:35,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742063_1239 (size=12301) 2024-11-20T22:24:35,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/b61705d6040b4278b6625bfa89edbdf6 2024-11-20T22:24:35,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:35,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:35,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,181 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/a93c92bcb98444bf929107d85e38eff9 is 50, key is test_row_0/C:col10/1732141474374/Put/seqid=0 2024-11-20T22:24:35,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742064_1240 (size=12301) 2024-11-20T22:24:35,334 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141535366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:35,435 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/8a0330fefd1f46ceac0179fc725260b8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8a0330fefd1f46ceac0179fc725260b8 2024-11-20T22:24:35,449 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into 8a0330fefd1f46ceac0179fc725260b8(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-11-20T22:24:35,449 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:35,449 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=13, startTime=1732141474371; duration=1sec 2024-11-20T22:24:35,449 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:35,449 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:35,490 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:35,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:35,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,558 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/ece1468cb3584a0e9185300c42724c41 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ece1468cb3584a0e9185300c42724c41 2024-11-20T22:24:35,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141535571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141535573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141535574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,576 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into ece1468cb3584a0e9185300c42724c41(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:35,576 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:35,576 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=13, startTime=1732141474371; duration=0sec 2024-11-20T22:24:35,576 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:35,576 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:35,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141535608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/a93c92bcb98444bf929107d85e38eff9 2024-11-20T22:24:35,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/4ad2ae714e5d48dc93c3a5e2886dc6fd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/4ad2ae714e5d48dc93c3a5e2886dc6fd 2024-11-20T22:24:35,651 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/4ad2ae714e5d48dc93c3a5e2886dc6fd, entries=150, sequenceid=441, filesize=30.5 K 2024-11-20T22:24:35,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/b61705d6040b4278b6625bfa89edbdf6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b61705d6040b4278b6625bfa89edbdf6 2024-11-20T22:24:35,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b61705d6040b4278b6625bfa89edbdf6, entries=150, sequenceid=441, filesize=12.0 K 2024-11-20T22:24:35,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:35,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:35,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/a93c92bcb98444bf929107d85e38eff9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a93c92bcb98444bf929107d85e38eff9 2024-11-20T22:24:35,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:35,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a93c92bcb98444bf929107d85e38eff9, entries=150, sequenceid=441, filesize=12.0 K 2024-11-20T22:24:35,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for fe73e78f2490c46e0778d445404a6f5f in 1314ms, sequenceid=441, compaction requested=false 2024-11-20T22:24:35,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:35,822 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:35,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T22:24:35,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:35,831 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:35,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:35,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:35,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:35,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:35,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204e4e71e9fb794fe5b30f3d4e980a7840_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141474437/Put/seqid=0 2024-11-20T22:24:35,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742065_1241 (size=12454) 2024-11-20T22:24:36,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:36,327 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204e4e71e9fb794fe5b30f3d4e980a7840_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e4e71e9fb794fe5b30f3d4e980a7840_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:36,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/7e1ce369a103471a973a1c7adfaba4fa, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:36,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/7e1ce369a103471a973a1c7adfaba4fa is 175, key is test_row_0/A:col10/1732141474437/Put/seqid=0 2024-11-20T22:24:36,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742066_1242 (size=31255) 2024-11-20T22:24:36,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:36,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141536627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141536628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141536629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141536633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141536739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141536744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141536748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141536748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,767 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=459, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/7e1ce369a103471a973a1c7adfaba4fa 2024-11-20T22:24:36,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/08e7048749c3416b89058850ceeea1c7 is 50, key is test_row_0/B:col10/1732141474437/Put/seqid=0 2024-11-20T22:24:36,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742067_1243 (size=12301) 2024-11-20T22:24:36,848 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=459 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/08e7048749c3416b89058850ceeea1c7 2024-11-20T22:24:36,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/7b7b7bb219af49a8823f73d3e0ab763a is 50, key is test_row_0/C:col10/1732141474437/Put/seqid=0 2024-11-20T22:24:36,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742068_1244 (size=12301) 2024-11-20T22:24:36,903 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=459 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/7b7b7bb219af49a8823f73d3e0ab763a 2024-11-20T22:24:36,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/7e1ce369a103471a973a1c7adfaba4fa as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/7e1ce369a103471a973a1c7adfaba4fa 2024-11-20T22:24:36,931 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/7e1ce369a103471a973a1c7adfaba4fa, entries=150, sequenceid=459, filesize=30.5 K 2024-11-20T22:24:36,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/08e7048749c3416b89058850ceeea1c7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/08e7048749c3416b89058850ceeea1c7 2024-11-20T22:24:36,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141536945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141536955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141536956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141536958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:36,983 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/08e7048749c3416b89058850ceeea1c7, entries=150, sequenceid=459, filesize=12.0 K 2024-11-20T22:24:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/7b7b7bb219af49a8823f73d3e0ab763a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/7b7b7bb219af49a8823f73d3e0ab763a 2024-11-20T22:24:37,018 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/7b7b7bb219af49a8823f73d3e0ab763a, entries=150, sequenceid=459, filesize=12.0 K 2024-11-20T22:24:37,019 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for fe73e78f2490c46e0778d445404a6f5f in 1188ms, sequenceid=459, compaction requested=true 2024-11-20T22:24:37,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:37,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:37,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-20T22:24:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-20T22:24:37,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T22:24:37,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.7550 sec 2024-11-20T22:24:37,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 3.7610 sec 2024-11-20T22:24:37,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:37,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:37,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:37,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:37,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:37,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:37,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:37,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:37,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203aacf6cf221546b4908bbd3343d94744_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141476626/Put/seqid=0 2024-11-20T22:24:37,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141537286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141537290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141537290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141537291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742069_1245 (size=14994) 2024-11-20T22:24:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T22:24:37,382 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-20T22:24:37,384 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-20T22:24:37,386 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:37,387 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:37,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:37,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48298 deadline: 1732141537383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:37,391 DEBUG [Thread-594 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., hostname=6365a1e51efd,46811,1732141422048, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:37,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141537399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141537399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141537400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141537400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:37,542 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:37,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:37,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:37,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:37,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141537611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141537615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141537615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141537611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,626 DEBUG [Thread-597 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x466910ad to 127.0.0.1:51916 2024-11-20T22:24:37,626 DEBUG [Thread-597 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:37,628 DEBUG [Thread-603 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37126f6a to 127.0.0.1:51916 2024-11-20T22:24:37,629 DEBUG [Thread-603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:37,629 DEBUG [Thread-599 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d20fc0f to 127.0.0.1:51916 2024-11-20T22:24:37,629 DEBUG [Thread-599 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:37,631 DEBUG [Thread-601 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26ebd463 to 127.0.0.1:51916 2024-11-20T22:24:37,631 DEBUG [Thread-601 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:37,701 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:37,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:37,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:37,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:37,703 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,745 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:37,758 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203aacf6cf221546b4908bbd3343d94744_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203aacf6cf221546b4908bbd3343d94744_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:37,760 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/3203119dbb3046619d85f506b1b63078, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:37,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/3203119dbb3046619d85f506b1b63078 is 175, key is test_row_0/A:col10/1732141476626/Put/seqid=0 2024-11-20T22:24:37,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742070_1246 (size=39949) 2024-11-20T22:24:37,837 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=481, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/3203119dbb3046619d85f506b1b63078 2024-11-20T22:24:37,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/8e2419fb7f2f46cdb0b8082090a15711 is 50, key is test_row_0/B:col10/1732141476626/Put/seqid=0 2024-11-20T22:24:37,859 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:37,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:37,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:37,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:37,860 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:37,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742071_1247 (size=12301) 2024-11-20T22:24:37,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48326 deadline: 1732141537919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48304 deadline: 1732141537919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48334 deadline: 1732141537921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48320 deadline: 1732141537923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:38,019 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:38,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:38,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:38,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,179 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:38,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:38,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:38,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/8e2419fb7f2f46cdb0b8082090a15711 2024-11-20T22:24:38,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4eecb9f611154841a1dec7ef3fd7ee25 is 50, key is test_row_0/C:col10/1732141476626/Put/seqid=0 2024-11-20T22:24:38,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742072_1248 (size=12301) 2024-11-20T22:24:38,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4eecb9f611154841a1dec7ef3fd7ee25 2024-11-20T22:24:38,334 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:38,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:38,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:38,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/3203119dbb3046619d85f506b1b63078 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/3203119dbb3046619d85f506b1b63078 2024-11-20T22:24:38,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/3203119dbb3046619d85f506b1b63078, entries=200, sequenceid=481, filesize=39.0 K 2024-11-20T22:24:38,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/8e2419fb7f2f46cdb0b8082090a15711 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8e2419fb7f2f46cdb0b8082090a15711 2024-11-20T22:24:38,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8e2419fb7f2f46cdb0b8082090a15711, entries=150, sequenceid=481, filesize=12.0 K 2024-11-20T22:24:38,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/4eecb9f611154841a1dec7ef3fd7ee25 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4eecb9f611154841a1dec7ef3fd7ee25 2024-11-20T22:24:38,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4eecb9f611154841a1dec7ef3fd7ee25, entries=150, sequenceid=481, filesize=12.0 K 2024-11-20T22:24:38,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for fe73e78f2490c46e0778d445404a6f5f in 1116ms, sequenceid=481, compaction requested=true 2024-11-20T22:24:38,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:38,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:38,374 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:38,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:38,374 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:38,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:38,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:38,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe73e78f2490c46e0778d445404a6f5f:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:38,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:38,383 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:38,383 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/B is initiating minor compaction (all files) 2024-11-20T22:24:38,383 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/B in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,383 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da9add30f9494b75a0c32d7e1c556b7a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b61705d6040b4278b6625bfa89edbdf6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/08e7048749c3416b89058850ceeea1c7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8e2419fb7f2f46cdb0b8082090a15711] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=49.0 K 2024-11-20T22:24:38,384 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:38,384 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/A is initiating minor compaction (all files) 2024-11-20T22:24:38,384 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/A in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,384 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8a0330fefd1f46ceac0179fc725260b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/4ad2ae714e5d48dc93c3a5e2886dc6fd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/7e1ce369a103471a973a1c7adfaba4fa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/3203119dbb3046619d85f506b1b63078] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=131.5 K 2024-11-20T22:24:38,384 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,384 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8a0330fefd1f46ceac0179fc725260b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/4ad2ae714e5d48dc93c3a5e2886dc6fd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/7e1ce369a103471a973a1c7adfaba4fa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/3203119dbb3046619d85f506b1b63078] 2024-11-20T22:24:38,384 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting da9add30f9494b75a0c32d7e1c556b7a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141472365 2024-11-20T22:24:38,385 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a0330fefd1f46ceac0179fc725260b8, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141472365 2024-11-20T22:24:38,385 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b61705d6040b4278b6625bfa89edbdf6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732141473190 2024-11-20T22:24:38,385 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ad2ae714e5d48dc93c3a5e2886dc6fd, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732141473190 2024-11-20T22:24:38,385 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 08e7048749c3416b89058850ceeea1c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=459, earliestPutTs=1732141474433 2024-11-20T22:24:38,386 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e1ce369a103471a973a1c7adfaba4fa, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=459, earliestPutTs=1732141474433 2024-11-20T22:24:38,386 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e2419fb7f2f46cdb0b8082090a15711, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732141476626 2024-11-20T22:24:38,386 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3203119dbb3046619d85f506b1b63078, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732141476626 2024-11-20T22:24:38,409 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#B#compaction#198 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:38,410 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/5f87397878714589af66bf8bdbaa59a3 is 50, key is test_row_0/B:col10/1732141476626/Put/seqid=0 2024-11-20T22:24:38,423 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:38,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T22:24:38,432 DEBUG [Thread-592 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05bb3648 to 127.0.0.1:51916 2024-11-20T22:24:38,432 DEBUG [Thread-592 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:38,439 DEBUG [Thread-590 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65069e2f to 127.0.0.1:51916 2024-11-20T22:24:38,439 DEBUG [Thread-588 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1147c8c4 to 127.0.0.1:51916 2024-11-20T22:24:38,439 DEBUG [Thread-588 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:38,439 DEBUG [Thread-590 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:38,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:38,447 DEBUG [Thread-586 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x299dc956 to 127.0.0.1:51916 2024-11-20T22:24:38,448 DEBUG [Thread-586 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:38,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:38,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:38,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:38,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:38,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:38,454 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120884d4ede5df74815bbf57fb8340dccd5_fe73e78f2490c46e0778d445404a6f5f store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:38,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742073_1249 (size=13425) 2024-11-20T22:24:38,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112002f3546568274db7b975dd9eb6b44bfb_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_0/A:col10/1732141478430/Put/seqid=0 2024-11-20T22:24:38,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:38,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:38,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:38,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:38,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742074_1250 (size=12454) 2024-11-20T22:24:38,535 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120884d4ede5df74815bbf57fb8340dccd5_fe73e78f2490c46e0778d445404a6f5f, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:38,535 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120884d4ede5df74815bbf57fb8340dccd5_fe73e78f2490c46e0778d445404a6f5f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:38,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742075_1251 (size=4469) 2024-11-20T22:24:38,567 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#A#compaction#199 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:38,568 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/b760dd3a261d4b2ab20dfd57fc11d70e is 175, key is test_row_0/A:col10/1732141476626/Put/seqid=0 2024-11-20T22:24:38,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742076_1252 (size=32379) 2024-11-20T22:24:38,641 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:38,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:38,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:38,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,795 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:38,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:38,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:38,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,905 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/5f87397878714589af66bf8bdbaa59a3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/5f87397878714589af66bf8bdbaa59a3 2024-11-20T22:24:38,932 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/B of fe73e78f2490c46e0778d445404a6f5f into 5f87397878714589af66bf8bdbaa59a3(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:38,932 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:38,932 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/B, priority=12, startTime=1732141478374; duration=0sec 2024-11-20T22:24:38,932 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:38,932 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:B 2024-11-20T22:24:38,932 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:38,933 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:38,937 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:38,937 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): fe73e78f2490c46e0778d445404a6f5f/C is initiating minor compaction (all files) 2024-11-20T22:24:38,937 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe73e78f2490c46e0778d445404a6f5f/C in TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,937 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ece1468cb3584a0e9185300c42724c41, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a93c92bcb98444bf929107d85e38eff9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/7b7b7bb219af49a8823f73d3e0ab763a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4eecb9f611154841a1dec7ef3fd7ee25] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp, totalSize=49.0 K 2024-11-20T22:24:38,938 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ece1468cb3584a0e9185300c42724c41, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141472365 2024-11-20T22:24:38,938 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a93c92bcb98444bf929107d85e38eff9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732141473190 2024-11-20T22:24:38,941 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b7b7bb219af49a8823f73d3e0ab763a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=459, earliestPutTs=1732141474433 2024-11-20T22:24:38,943 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112002f3546568274db7b975dd9eb6b44bfb_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112002f3546568274db7b975dd9eb6b44bfb_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:38,943 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4eecb9f611154841a1dec7ef3fd7ee25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732141476626 2024-11-20T22:24:38,945 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/2662833d38d74531b50db2dc370f83a2, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:38,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/2662833d38d74531b50db2dc370f83a2 is 175, key is test_row_0/A:col10/1732141478430/Put/seqid=0 2024-11-20T22:24:38,950 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:38,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:38,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:38,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:38,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:38,972 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe73e78f2490c46e0778d445404a6f5f#C#compaction#201 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:38,972 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d292a049752c44e6b33c00fb35e980d6 is 50, key is test_row_0/C:col10/1732141476626/Put/seqid=0 2024-11-20T22:24:38,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742077_1253 (size=31255) 2024-11-20T22:24:39,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742078_1254 (size=13425) 2024-11-20T22:24:39,017 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/d292a049752c44e6b33c00fb35e980d6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d292a049752c44e6b33c00fb35e980d6 2024-11-20T22:24:39,023 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/b760dd3a261d4b2ab20dfd57fc11d70e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b760dd3a261d4b2ab20dfd57fc11d70e 2024-11-20T22:24:39,041 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/C of fe73e78f2490c46e0778d445404a6f5f into d292a049752c44e6b33c00fb35e980d6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:39,042 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:39,042 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/C, priority=12, startTime=1732141478374; duration=0sec 2024-11-20T22:24:39,042 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:39,042 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:C 2024-11-20T22:24:39,042 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe73e78f2490c46e0778d445404a6f5f/A of fe73e78f2490c46e0778d445404a6f5f into b760dd3a261d4b2ab20dfd57fc11d70e(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:39,042 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:39,042 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f., storeName=fe73e78f2490c46e0778d445404a6f5f/A, priority=12, startTime=1732141478373; duration=0sec 2024-11-20T22:24:39,042 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:39,042 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe73e78f2490c46e0778d445404a6f5f:A 2024-11-20T22:24:39,115 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:39,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:39,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:39,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,160 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:24:39,271 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:39,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:39,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:39,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,376 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=500, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/2662833d38d74531b50db2dc370f83a2 2024-11-20T22:24:39,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2dce8fdece5f417cbe39ef73afa4fb1c is 50, key is test_row_0/B:col10/1732141478430/Put/seqid=0 2024-11-20T22:24:39,433 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:39,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:39,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:39,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742079_1255 (size=12301) 2024-11-20T22:24:39,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2dce8fdece5f417cbe39ef73afa4fb1c 2024-11-20T22:24:39,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2939ef49bc3944adbfc3226adad00096 is 50, key is test_row_0/C:col10/1732141478430/Put/seqid=0 2024-11-20T22:24:39,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:39,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742080_1256 (size=12301) 2024-11-20T22:24:39,594 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:39,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:39,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:39,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,755 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:39,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:39,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:39,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,915 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:39,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. as already flushing 2024-11-20T22:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:39,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:39,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2939ef49bc3944adbfc3226adad00096 2024-11-20T22:24:39,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/2662833d38d74531b50db2dc370f83a2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2662833d38d74531b50db2dc370f83a2 2024-11-20T22:24:39,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2662833d38d74531b50db2dc370f83a2, entries=150, sequenceid=500, filesize=30.5 K 2024-11-20T22:24:39,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2dce8fdece5f417cbe39ef73afa4fb1c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2dce8fdece5f417cbe39ef73afa4fb1c 2024-11-20T22:24:39,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2dce8fdece5f417cbe39ef73afa4fb1c, entries=150, sequenceid=500, filesize=12.0 K 2024-11-20T22:24:39,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/2939ef49bc3944adbfc3226adad00096 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2939ef49bc3944adbfc3226adad00096 2024-11-20T22:24:40,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2939ef49bc3944adbfc3226adad00096, entries=150, sequenceid=500, filesize=12.0 K 2024-11-20T22:24:40,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for fe73e78f2490c46e0778d445404a6f5f in 1603ms, sequenceid=500, compaction requested=false 2024-11-20T22:24:40,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:40,079 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:40,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T22:24:40,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:40,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:40,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:40,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-20T22:24:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-20T22:24:40,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-20T22:24:40,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6970 sec 2024-11-20T22:24:40,088 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.7020 sec 2024-11-20T22:24:41,400 DEBUG [Thread-594 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5fc07fbc to 127.0.0.1:51916 2024-11-20T22:24:41,400 DEBUG [Thread-594 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:41,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T22:24:41,501 INFO [Thread-596 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3050 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2940 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1317 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3950 rows 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1310 2024-11-20T22:24:41,501 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3928 rows 2024-11-20T22:24:41,502 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:24:41,502 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f5f7848 to 127.0.0.1:51916 2024-11-20T22:24:41,502 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:24:41,512 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:24:41,512 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:24:41,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:41,515 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141481515"}]},"ts":"1732141481515"} 2024-11-20T22:24:41,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:41,516 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:24:41,526 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:24:41,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:24:41,528 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, UNASSIGN}] 2024-11-20T22:24:41,528 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, UNASSIGN 2024-11-20T22:24:41,529 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=fe73e78f2490c46e0778d445404a6f5f, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:41,530 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:24:41,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:24:41,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:41,681 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:41,686 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing fe73e78f2490c46e0778d445404a6f5f, disabling compactions & flushes 2024-11-20T22:24:41,686 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. after waiting 0 ms 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:41,686 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing fe73e78f2490c46e0778d445404a6f5f 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=A 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=B 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe73e78f2490c46e0778d445404a6f5f, store=C 2024-11-20T22:24:41,686 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:41,693 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200eabb86c27474e4fa2600601935d59f2_fe73e78f2490c46e0778d445404a6f5f is 50, key is test_row_1/A:col10/1732141481398/Put/seqid=0 2024-11-20T22:24:41,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742081_1257 (size=7374) 2024-11-20T22:24:41,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:42,112 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:42,117 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200eabb86c27474e4fa2600601935d59f2_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200eabb86c27474e4fa2600601935d59f2_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:42,118 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/bdc0628c26cb4bb6bb85a24979f69ce7, store: [table=TestAcidGuarantees family=A region=fe73e78f2490c46e0778d445404a6f5f] 2024-11-20T22:24:42,119 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/bdc0628c26cb4bb6bb85a24979f69ce7 is 175, key is test_row_1/A:col10/1732141481398/Put/seqid=0 2024-11-20T22:24:42,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:42,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742082_1258 (size=13865) 2024-11-20T22:24:42,126 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=508, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/bdc0628c26cb4bb6bb85a24979f69ce7 2024-11-20T22:24:42,139 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2ea038d6a2b44ffbb65803f6d6cf92e0 is 50, key is test_row_1/B:col10/1732141481398/Put/seqid=0 2024-11-20T22:24:42,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742083_1259 (size=7415) 2024-11-20T22:24:42,186 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2ea038d6a2b44ffbb65803f6d6cf92e0 2024-11-20T22:24:42,196 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/5d090d65b2ac4f68a01e7d86649fb284 is 50, key is test_row_1/C:col10/1732141481398/Put/seqid=0 2024-11-20T22:24:42,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742084_1260 (size=7415) 2024-11-20T22:24:42,234 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/5d090d65b2ac4f68a01e7d86649fb284 2024-11-20T22:24:42,243 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/A/bdc0628c26cb4bb6bb85a24979f69ce7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bdc0628c26cb4bb6bb85a24979f69ce7 2024-11-20T22:24:42,250 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bdc0628c26cb4bb6bb85a24979f69ce7, entries=50, sequenceid=508, filesize=13.5 K 2024-11-20T22:24:42,251 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/B/2ea038d6a2b44ffbb65803f6d6cf92e0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2ea038d6a2b44ffbb65803f6d6cf92e0 2024-11-20T22:24:42,256 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2ea038d6a2b44ffbb65803f6d6cf92e0, entries=50, sequenceid=508, filesize=7.2 K 2024-11-20T22:24:42,258 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/.tmp/C/5d090d65b2ac4f68a01e7d86649fb284 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/5d090d65b2ac4f68a01e7d86649fb284 2024-11-20T22:24:42,272 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/5d090d65b2ac4f68a01e7d86649fb284, entries=50, sequenceid=508, filesize=7.2 K 2024-11-20T22:24:42,275 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for fe73e78f2490c46e0778d445404a6f5f in 588ms, sequenceid=508, compaction requested=true 2024-11-20T22:24:42,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/d89b3fe52411476a8255c539f4e79bf9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/923977962c2540c0949aa05c372878d4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/86ed06785e394c1f96f8c6506cbc02ec, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/06380b6a8c76481db56bfce25571d2a6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/5e1506ac8a864defb15976dc7ceab9f6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/30dea8e3df774d2397da742c12b8342b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/61124b6f9fd749dba1d22dc9f04294e2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ea45d439652d46ada94d6f828daa4a89, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/38a318eee979431b89723b443d2a4f0a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/fb854acc94f54ff5adad14e786bc8e68, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/dd0d598132e547c6a28d0b1dfdd24003, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bd194e5feef74f218964f596d1b2e70a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c19c4893ba984affa363a521a2cd1a7a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8b693e0b5ff44a0095803714f4c9ba15, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/cc0f0c12ae744baba81074f8fe066bf7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/da726a3c9bdb4d2ea009521d6e8a02a0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/42266aab2bb24eb88c6f958c75db7eac, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/9d986dc846de45fab2f245c0843b3419, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ed6a8de06576429e88f2119694cf553c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c96ea94238a04a7988ff379747be9d52, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/12f4b4bde5424c74bbe511eb86b066fd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/54149b9697094dd7865135b38120d7e4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b47fcc50e0af46e8b29d7b53d7e64f99, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e647b5ee89b94006aab8a90883f98527, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/120a5fa1252845138001ef8f877b2bcf, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/0c29799ef01a447f8562ca822ac10103, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2f97909fab1247bb97c24db574d41adc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e00e6cabd2524dcaa5adb2a3fe3003d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c330081d6cd2455c9ab983edd5f60c2d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8a0330fefd1f46ceac0179fc725260b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/4ad2ae714e5d48dc93c3a5e2886dc6fd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/7e1ce369a103471a973a1c7adfaba4fa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/3203119dbb3046619d85f506b1b63078] to archive 2024-11-20T22:24:42,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:42,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/d89b3fe52411476a8255c539f4e79bf9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/d89b3fe52411476a8255c539f4e79bf9 2024-11-20T22:24:42,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/923977962c2540c0949aa05c372878d4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/923977962c2540c0949aa05c372878d4 2024-11-20T22:24:42,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/86ed06785e394c1f96f8c6506cbc02ec to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/86ed06785e394c1f96f8c6506cbc02ec 2024-11-20T22:24:42,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/06380b6a8c76481db56bfce25571d2a6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/06380b6a8c76481db56bfce25571d2a6 2024-11-20T22:24:42,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/5e1506ac8a864defb15976dc7ceab9f6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/5e1506ac8a864defb15976dc7ceab9f6 2024-11-20T22:24:42,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/30dea8e3df774d2397da742c12b8342b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/30dea8e3df774d2397da742c12b8342b 2024-11-20T22:24:42,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/61124b6f9fd749dba1d22dc9f04294e2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/61124b6f9fd749dba1d22dc9f04294e2 2024-11-20T22:24:42,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ea45d439652d46ada94d6f828daa4a89 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ea45d439652d46ada94d6f828daa4a89 2024-11-20T22:24:42,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/38a318eee979431b89723b443d2a4f0a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/38a318eee979431b89723b443d2a4f0a 2024-11-20T22:24:42,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/fb854acc94f54ff5adad14e786bc8e68 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/fb854acc94f54ff5adad14e786bc8e68 2024-11-20T22:24:42,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/dd0d598132e547c6a28d0b1dfdd24003 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/dd0d598132e547c6a28d0b1dfdd24003 2024-11-20T22:24:42,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bd194e5feef74f218964f596d1b2e70a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bd194e5feef74f218964f596d1b2e70a 2024-11-20T22:24:42,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c19c4893ba984affa363a521a2cd1a7a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c19c4893ba984affa363a521a2cd1a7a 2024-11-20T22:24:42,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8b693e0b5ff44a0095803714f4c9ba15 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8b693e0b5ff44a0095803714f4c9ba15 2024-11-20T22:24:42,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/cc0f0c12ae744baba81074f8fe066bf7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/cc0f0c12ae744baba81074f8fe066bf7 2024-11-20T22:24:42,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/da726a3c9bdb4d2ea009521d6e8a02a0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/da726a3c9bdb4d2ea009521d6e8a02a0 2024-11-20T22:24:42,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/42266aab2bb24eb88c6f958c75db7eac to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/42266aab2bb24eb88c6f958c75db7eac 2024-11-20T22:24:42,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/9d986dc846de45fab2f245c0843b3419 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/9d986dc846de45fab2f245c0843b3419 2024-11-20T22:24:42,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ed6a8de06576429e88f2119694cf553c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/ed6a8de06576429e88f2119694cf553c 2024-11-20T22:24:42,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c96ea94238a04a7988ff379747be9d52 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c96ea94238a04a7988ff379747be9d52 2024-11-20T22:24:42,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/12f4b4bde5424c74bbe511eb86b066fd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/12f4b4bde5424c74bbe511eb86b066fd 2024-11-20T22:24:42,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/54149b9697094dd7865135b38120d7e4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/54149b9697094dd7865135b38120d7e4 2024-11-20T22:24:42,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b47fcc50e0af46e8b29d7b53d7e64f99 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b47fcc50e0af46e8b29d7b53d7e64f99 2024-11-20T22:24:42,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e647b5ee89b94006aab8a90883f98527 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e647b5ee89b94006aab8a90883f98527 2024-11-20T22:24:42,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/120a5fa1252845138001ef8f877b2bcf to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/120a5fa1252845138001ef8f877b2bcf 2024-11-20T22:24:42,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/0c29799ef01a447f8562ca822ac10103 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/0c29799ef01a447f8562ca822ac10103 2024-11-20T22:24:42,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2f97909fab1247bb97c24db574d41adc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2f97909fab1247bb97c24db574d41adc 2024-11-20T22:24:42,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e00e6cabd2524dcaa5adb2a3fe3003d2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/e00e6cabd2524dcaa5adb2a3fe3003d2 2024-11-20T22:24:42,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c330081d6cd2455c9ab983edd5f60c2d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/c330081d6cd2455c9ab983edd5f60c2d 2024-11-20T22:24:42,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8a0330fefd1f46ceac0179fc725260b8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/8a0330fefd1f46ceac0179fc725260b8 2024-11-20T22:24:42,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/4ad2ae714e5d48dc93c3a5e2886dc6fd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/4ad2ae714e5d48dc93c3a5e2886dc6fd 2024-11-20T22:24:42,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/7e1ce369a103471a973a1c7adfaba4fa to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/7e1ce369a103471a973a1c7adfaba4fa 2024-11-20T22:24:42,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/3203119dbb3046619d85f506b1b63078 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/3203119dbb3046619d85f506b1b63078 2024-11-20T22:24:42,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/7b77c8a01986494a933af2ed266ddda8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/652f70bf8a00436697b250e200585750, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/20475f88f6cf4ce18c52c22125174158, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/3b21c933398f4fd984680db51f31c54e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/92b7c9a25c3b4f47b0c25bf4180a0028, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/e476556b8f8d4ca9a52af901c4042204, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d96be29aaf904508965a538404b59285, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d0cb28dce7f245f5a87fa0f362846475, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b4d62d12ebbe4ad8a9255f6d04d08695, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f4cde6257eec4bc6955b6ebb1d7f09ed, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/0bff6b768a4b4744b6b3c34f5b5a52cb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/fc178e457f02414d814a0049f6bb1e12, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f758a7c147134fc888de6e35922ceac5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1098c276946b40828185da18c4792870, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/563cdbf6b66c484aa5bfd412bf23c8a2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2b1289462086405f85d10f4e8a8e962d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1919a0ba08d24a438dcdd93e1869cb77, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/ac9d73a93dac4938a585e38fa639f277, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4f08f16d5b4c4fb5be3c7b921d445f03, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4c9b54ca335748299f7223b9f74c82ff, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4aa5e8015d724c689a1a3cbc40c9a0f3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/72e149497a054a9eb480373af3bd301f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/876aae8822834076b33497cad042a771, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/77f4af9e275545b395f2636064a367fc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/68bac55bc5ae4e5ea762b1e37ceaef1d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/956957548c044981b4f09d28e0e41dee, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f3733cd4927c4303b5a6b68a693f7f17, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da01a25fe8664b42b3864a9c25263a0a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da9add30f9494b75a0c32d7e1c556b7a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8fa7f80349634cbdb38aa38765be9f7c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b61705d6040b4278b6625bfa89edbdf6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/08e7048749c3416b89058850ceeea1c7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8e2419fb7f2f46cdb0b8082090a15711] to archive 2024-11-20T22:24:42,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:42,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/7b77c8a01986494a933af2ed266ddda8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/7b77c8a01986494a933af2ed266ddda8 2024-11-20T22:24:42,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/652f70bf8a00436697b250e200585750 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/652f70bf8a00436697b250e200585750 2024-11-20T22:24:42,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/20475f88f6cf4ce18c52c22125174158 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/20475f88f6cf4ce18c52c22125174158 2024-11-20T22:24:42,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/3b21c933398f4fd984680db51f31c54e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/3b21c933398f4fd984680db51f31c54e 2024-11-20T22:24:42,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/92b7c9a25c3b4f47b0c25bf4180a0028 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/92b7c9a25c3b4f47b0c25bf4180a0028 2024-11-20T22:24:42,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/e476556b8f8d4ca9a52af901c4042204 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/e476556b8f8d4ca9a52af901c4042204 2024-11-20T22:24:42,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d96be29aaf904508965a538404b59285 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d96be29aaf904508965a538404b59285 2024-11-20T22:24:42,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d0cb28dce7f245f5a87fa0f362846475 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/d0cb28dce7f245f5a87fa0f362846475 2024-11-20T22:24:42,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b4d62d12ebbe4ad8a9255f6d04d08695 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b4d62d12ebbe4ad8a9255f6d04d08695 2024-11-20T22:24:42,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f4cde6257eec4bc6955b6ebb1d7f09ed to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f4cde6257eec4bc6955b6ebb1d7f09ed 2024-11-20T22:24:42,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/0bff6b768a4b4744b6b3c34f5b5a52cb to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/0bff6b768a4b4744b6b3c34f5b5a52cb 2024-11-20T22:24:42,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/fc178e457f02414d814a0049f6bb1e12 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/fc178e457f02414d814a0049f6bb1e12 2024-11-20T22:24:42,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f758a7c147134fc888de6e35922ceac5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f758a7c147134fc888de6e35922ceac5 2024-11-20T22:24:42,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1098c276946b40828185da18c4792870 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1098c276946b40828185da18c4792870 2024-11-20T22:24:42,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/563cdbf6b66c484aa5bfd412bf23c8a2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/563cdbf6b66c484aa5bfd412bf23c8a2 2024-11-20T22:24:42,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2b1289462086405f85d10f4e8a8e962d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2b1289462086405f85d10f4e8a8e962d 2024-11-20T22:24:42,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1919a0ba08d24a438dcdd93e1869cb77 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/1919a0ba08d24a438dcdd93e1869cb77 2024-11-20T22:24:42,578 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/ac9d73a93dac4938a585e38fa639f277 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/ac9d73a93dac4938a585e38fa639f277 2024-11-20T22:24:42,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4f08f16d5b4c4fb5be3c7b921d445f03 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4f08f16d5b4c4fb5be3c7b921d445f03 2024-11-20T22:24:42,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4c9b54ca335748299f7223b9f74c82ff to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4c9b54ca335748299f7223b9f74c82ff 2024-11-20T22:24:42,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4aa5e8015d724c689a1a3cbc40c9a0f3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/4aa5e8015d724c689a1a3cbc40c9a0f3 2024-11-20T22:24:42,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/72e149497a054a9eb480373af3bd301f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/72e149497a054a9eb480373af3bd301f 2024-11-20T22:24:42,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/876aae8822834076b33497cad042a771 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/876aae8822834076b33497cad042a771 2024-11-20T22:24:42,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/77f4af9e275545b395f2636064a367fc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/77f4af9e275545b395f2636064a367fc 2024-11-20T22:24:42,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/68bac55bc5ae4e5ea762b1e37ceaef1d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/68bac55bc5ae4e5ea762b1e37ceaef1d 2024-11-20T22:24:42,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/956957548c044981b4f09d28e0e41dee to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/956957548c044981b4f09d28e0e41dee 2024-11-20T22:24:42,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f3733cd4927c4303b5a6b68a693f7f17 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/f3733cd4927c4303b5a6b68a693f7f17 2024-11-20T22:24:42,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da01a25fe8664b42b3864a9c25263a0a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da01a25fe8664b42b3864a9c25263a0a 2024-11-20T22:24:42,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da9add30f9494b75a0c32d7e1c556b7a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/da9add30f9494b75a0c32d7e1c556b7a 2024-11-20T22:24:42,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8fa7f80349634cbdb38aa38765be9f7c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8fa7f80349634cbdb38aa38765be9f7c 2024-11-20T22:24:42,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b61705d6040b4278b6625bfa89edbdf6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/b61705d6040b4278b6625bfa89edbdf6 2024-11-20T22:24:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:42,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/08e7048749c3416b89058850ceeea1c7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/08e7048749c3416b89058850ceeea1c7 2024-11-20T22:24:42,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8e2419fb7f2f46cdb0b8082090a15711 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/8e2419fb7f2f46cdb0b8082090a15711 2024-11-20T22:24:42,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/90460e659e52439cb9dfcbd8b514f49d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/9cf902f7b93e4c29a98ea5e23d8dc644, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d1e5f8de04814c2180c7b73e586adc11, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4d23665fcced425ca69cf1f067af60c0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/30140b117dda46e6a46b6a94ded72124, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f026d1ace4cb421b9e9b7280a7a2e926, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b7b2c1e782414d298d9327278b29c2bb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/48052de9105b4584af5783eb1aad3601, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a5dc15cbb1e0424aa327015a12048d2c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/853917c0221b4ae18c8463f4a6587ddc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/00ba5c88e8cb48dc9a58d8b0b998809c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/79828ee58afc4eb8a68179aba6f1e4b3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f36c3898836c4f8a963a16e4ed444b86, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4bf719aa074347e1a05b5f5af2170a6f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d3ec7a086a0a4b968f11911041f7254c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/62d4b8245cda4510a9086d1f260c2768, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/1343783d310a456f8e3ce79fcdaed03e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/983911b122ae4ce1bcbc1b3f09241f39, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4e2c4507b0254407ba6f6fa2006a0245, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/304a3537093740e6b208e3e4b1fb44f1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b30f0d86973942a4997056212afbfac8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/e1b2744c66504b7784bd89ea7e39c663, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ab5644ae44574e92b55868a9c4f5fd45, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/bff000b978914381b1a0619de0b68b58, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d97de9cd82454d49b1009ff07f0a9c77, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d84a6e0e9eb44cdba0d5118daf7623f8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2e709365ca2e4990b3ceddc0d768e4d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ece1468cb3584a0e9185300c42724c41, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/02e4af9142aa44e2889c2be7305f8338, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a93c92bcb98444bf929107d85e38eff9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/7b7b7bb219af49a8823f73d3e0ab763a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4eecb9f611154841a1dec7ef3fd7ee25] to archive 2024-11-20T22:24:42,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:24:42,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/90460e659e52439cb9dfcbd8b514f49d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/90460e659e52439cb9dfcbd8b514f49d 2024-11-20T22:24:42,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/9cf902f7b93e4c29a98ea5e23d8dc644 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/9cf902f7b93e4c29a98ea5e23d8dc644 2024-11-20T22:24:42,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d1e5f8de04814c2180c7b73e586adc11 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d1e5f8de04814c2180c7b73e586adc11 2024-11-20T22:24:42,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4d23665fcced425ca69cf1f067af60c0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4d23665fcced425ca69cf1f067af60c0 2024-11-20T22:24:42,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/30140b117dda46e6a46b6a94ded72124 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/30140b117dda46e6a46b6a94ded72124 2024-11-20T22:24:42,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f026d1ace4cb421b9e9b7280a7a2e926 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f026d1ace4cb421b9e9b7280a7a2e926 2024-11-20T22:24:42,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2ab1d89f9fbe40bd8b7b2dd7fd913c0e 2024-11-20T22:24:42,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b7b2c1e782414d298d9327278b29c2bb to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b7b2c1e782414d298d9327278b29c2bb 2024-11-20T22:24:42,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/48052de9105b4584af5783eb1aad3601 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/48052de9105b4584af5783eb1aad3601 2024-11-20T22:24:42,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a5dc15cbb1e0424aa327015a12048d2c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a5dc15cbb1e0424aa327015a12048d2c 2024-11-20T22:24:42,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/853917c0221b4ae18c8463f4a6587ddc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/853917c0221b4ae18c8463f4a6587ddc 2024-11-20T22:24:42,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/00ba5c88e8cb48dc9a58d8b0b998809c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/00ba5c88e8cb48dc9a58d8b0b998809c 2024-11-20T22:24:42,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/79828ee58afc4eb8a68179aba6f1e4b3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/79828ee58afc4eb8a68179aba6f1e4b3 2024-11-20T22:24:42,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f36c3898836c4f8a963a16e4ed444b86 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/f36c3898836c4f8a963a16e4ed444b86 2024-11-20T22:24:42,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4bf719aa074347e1a05b5f5af2170a6f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4bf719aa074347e1a05b5f5af2170a6f 2024-11-20T22:24:42,708 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d3ec7a086a0a4b968f11911041f7254c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d3ec7a086a0a4b968f11911041f7254c 2024-11-20T22:24:42,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/62d4b8245cda4510a9086d1f260c2768 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/62d4b8245cda4510a9086d1f260c2768 2024-11-20T22:24:42,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/1343783d310a456f8e3ce79fcdaed03e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/1343783d310a456f8e3ce79fcdaed03e 2024-11-20T22:24:42,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/983911b122ae4ce1bcbc1b3f09241f39 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/983911b122ae4ce1bcbc1b3f09241f39 2024-11-20T22:24:42,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4e2c4507b0254407ba6f6fa2006a0245 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4e2c4507b0254407ba6f6fa2006a0245 2024-11-20T22:24:42,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/304a3537093740e6b208e3e4b1fb44f1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/304a3537093740e6b208e3e4b1fb44f1 2024-11-20T22:24:42,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b30f0d86973942a4997056212afbfac8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/b30f0d86973942a4997056212afbfac8 2024-11-20T22:24:42,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/e1b2744c66504b7784bd89ea7e39c663 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/e1b2744c66504b7784bd89ea7e39c663 2024-11-20T22:24:42,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ab5644ae44574e92b55868a9c4f5fd45 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ab5644ae44574e92b55868a9c4f5fd45 2024-11-20T22:24:42,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/bff000b978914381b1a0619de0b68b58 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/bff000b978914381b1a0619de0b68b58 2024-11-20T22:24:42,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d97de9cd82454d49b1009ff07f0a9c77 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d97de9cd82454d49b1009ff07f0a9c77 2024-11-20T22:24:42,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d84a6e0e9eb44cdba0d5118daf7623f8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d84a6e0e9eb44cdba0d5118daf7623f8 2024-11-20T22:24:42,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2e709365ca2e4990b3ceddc0d768e4d2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2e709365ca2e4990b3ceddc0d768e4d2 2024-11-20T22:24:42,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ece1468cb3584a0e9185300c42724c41 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/ece1468cb3584a0e9185300c42724c41 2024-11-20T22:24:42,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/02e4af9142aa44e2889c2be7305f8338 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/02e4af9142aa44e2889c2be7305f8338 2024-11-20T22:24:42,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a93c92bcb98444bf929107d85e38eff9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/a93c92bcb98444bf929107d85e38eff9 2024-11-20T22:24:42,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/7b7b7bb219af49a8823f73d3e0ab763a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/7b7b7bb219af49a8823f73d3e0ab763a 2024-11-20T22:24:42,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4eecb9f611154841a1dec7ef3fd7ee25 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/4eecb9f611154841a1dec7ef3fd7ee25 2024-11-20T22:24:42,805 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/recovered.edits/511.seqid, newMaxSeqId=511, maxSeqId=4 2024-11-20T22:24:42,806 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f. 2024-11-20T22:24:42,806 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for fe73e78f2490c46e0778d445404a6f5f: 2024-11-20T22:24:42,808 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:42,808 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=fe73e78f2490c46e0778d445404a6f5f, regionState=CLOSED 2024-11-20T22:24:42,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T22:24:42,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure fe73e78f2490c46e0778d445404a6f5f, server=6365a1e51efd,46811,1732141422048 in 1.2790 sec 2024-11-20T22:24:42,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-11-20T22:24:42,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe73e78f2490c46e0778d445404a6f5f, UNASSIGN in 1.2830 sec 2024-11-20T22:24:42,814 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T22:24:42,814 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.2870 sec 2024-11-20T22:24:42,816 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141482816"}]},"ts":"1732141482816"} 2024-11-20T22:24:42,818 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:24:42,826 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:24:42,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.3140 sec 2024-11-20T22:24:43,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T22:24:43,622 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-20T22:24:43,623 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:24:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:43,628 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:43,629 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:43,635 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,640 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/recovered.edits] 2024-11-20T22:24:43,642 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2662833d38d74531b50db2dc370f83a2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/2662833d38d74531b50db2dc370f83a2 2024-11-20T22:24:43,644 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b760dd3a261d4b2ab20dfd57fc11d70e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/b760dd3a261d4b2ab20dfd57fc11d70e 2024-11-20T22:24:43,645 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bdc0628c26cb4bb6bb85a24979f69ce7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/A/bdc0628c26cb4bb6bb85a24979f69ce7 2024-11-20T22:24:43,648 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2dce8fdece5f417cbe39ef73afa4fb1c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2dce8fdece5f417cbe39ef73afa4fb1c 2024-11-20T22:24:43,649 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2ea038d6a2b44ffbb65803f6d6cf92e0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/2ea038d6a2b44ffbb65803f6d6cf92e0 2024-11-20T22:24:43,661 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/5f87397878714589af66bf8bdbaa59a3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/B/5f87397878714589af66bf8bdbaa59a3 2024-11-20T22:24:43,665 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2939ef49bc3944adbfc3226adad00096 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/2939ef49bc3944adbfc3226adad00096 2024-11-20T22:24:43,666 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/5d090d65b2ac4f68a01e7d86649fb284 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/5d090d65b2ac4f68a01e7d86649fb284 2024-11-20T22:24:43,668 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d292a049752c44e6b33c00fb35e980d6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/C/d292a049752c44e6b33c00fb35e980d6 2024-11-20T22:24:43,671 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/recovered.edits/511.seqid to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f/recovered.edits/511.seqid 2024-11-20T22:24:43,673 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,673 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:24:43,673 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:24:43,674 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T22:24:43,682 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112002f3546568274db7b975dd9eb6b44bfb_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112002f3546568274db7b975dd9eb6b44bfb_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,685 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200eabb86c27474e4fa2600601935d59f2_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200eabb86c27474e4fa2600601935d59f2_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,686 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112012a7aff7f3c74497815b388eabfdbcf5_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112012a7aff7f3c74497815b388eabfdbcf5_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,688 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120144ead4bea1e48c8b25c583ade9b53e1_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120144ead4bea1e48c8b25c583ade9b53e1_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,691 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120280150315869469fa862f086ebbd42b4_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120280150315869469fa862f086ebbd42b4_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,694 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f04700efe69453081c77f96ac51aca6_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f04700efe69453081c77f96ac51aca6_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,696 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f8a14f8ff5b4c96abbfe8562c71344b_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f8a14f8ff5b4c96abbfe8562c71344b_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,698 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112032a6ad1b0bed4d51acb37a56e6bc7fca_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112032a6ad1b0bed4d51acb37a56e6bc7fca_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,714 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203a36d23cfc4645a3a24a202b1dff1487_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203a36d23cfc4645a3a24a202b1dff1487_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,722 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203aacf6cf221546b4908bbd3343d94744_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203aacf6cf221546b4908bbd3343d94744_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,726 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203b00e44fbdb343deb38eefcdb0fc4b5a_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203b00e44fbdb343deb38eefcdb0fc4b5a_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,728 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e4e71e9fb794fe5b30f3d4e980a7840_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e4e71e9fb794fe5b30f3d4e980a7840_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:43,732 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205c79d245f9cf4a7fbbdfef53933e4783_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205c79d245f9cf4a7fbbdfef53933e4783_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,734 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206e77fcd0535845ff99a015bc811531f3_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206e77fcd0535845ff99a015bc811531f3_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,742 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207ab0e0624cc440e0b94453a76db73564_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207ab0e0624cc440e0b94453a76db73564_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,744 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209115d962e5554718b29894c29bed0994_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209115d962e5554718b29894c29bed0994_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,753 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2869cc01de04abb96199fb8c448cd44_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2869cc01de04abb96199fb8c448cd44_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,755 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b12b298be8e945d193bb0ee5480b293b_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b12b298be8e945d193bb0ee5480b293b_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,758 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bec71bb475bf4cffa956572e2b076c91_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bec71bb475bf4cffa956572e2b076c91_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,761 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d32a79bbbe264b5986b9459f7b763312_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d32a79bbbe264b5986b9459f7b763312_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,764 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d5ca6f9f55d948179221450269267513_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d5ca6f9f55d948179221450269267513_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,766 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d748393b7f704028842b676ed3e45a8c_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d748393b7f704028842b676ed3e45a8c_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,771 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da2a06ac8d364527ab7707aab82d3f40_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da2a06ac8d364527ab7707aab82d3f40_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,779 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dace773c917a44cbb8c5fae8b22523e8_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dace773c917a44cbb8c5fae8b22523e8_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,794 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f292715b3050476b8a717f39b0101a2b_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f292715b3050476b8a717f39b0101a2b_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,798 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fe74eb33df244487af51d016de74859c_fe73e78f2490c46e0778d445404a6f5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fe74eb33df244487af51d016de74859c_fe73e78f2490c46e0778d445404a6f5f 2024-11-20T22:24:43,801 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:24:43,807 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:43,811 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:24:43,823 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:24:43,828 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:43,828 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:24:43,828 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141483828"}]},"ts":"9223372036854775807"} 2024-11-20T22:24:43,832 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:24:43,832 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => fe73e78f2490c46e0778d445404a6f5f, NAME => 'TestAcidGuarantees,,1732141454370.fe73e78f2490c46e0778d445404a6f5f.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:24:43,832 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:24:43,832 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141483832"}]},"ts":"9223372036854775807"} 2024-11-20T22:24:43,848 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:24:43,860 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:43,861 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 237 msec 2024-11-20T22:24:43,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T22:24:43,931 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T22:24:43,944 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=241 (was 239) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1365377924_22 at /127.0.0.1:35390 [Waiting for operation #281] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1365377924_22 at /127.0.0.1:60948 [Waiting for operation #311] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2514ba89-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-424572154_22 at /127.0.0.1:35908 [Waiting for operation #137] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2514ba89-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2514ba89-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2514ba89-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=461 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1118 (was 1010) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2131 (was 1603) - AvailableMemoryMB LEAK? - 2024-11-20T22:24:43,960 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=241, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=1118, ProcessCount=11, AvailableMemoryMB=2130 2024-11-20T22:24:43,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:24:43,962 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:24:43,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:24:43,968 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:24:43,968 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:43,968 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-11-20T22:24:43,969 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:24:43,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T22:24:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742085_1261 (size=963) 2024-11-20T22:24:44,025 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:24:44,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742086_1262 (size=53) 2024-11-20T22:24:44,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T22:24:44,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T22:24:44,450 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:44,450 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5f1e2878eea2034576ba469d1952fe84, disabling compactions & flushes 2024-11-20T22:24:44,450 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:44,450 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:44,450 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. after waiting 0 ms 2024-11-20T22:24:44,450 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:44,450 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:44,450 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:44,451 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:24:44,452 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141484451"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141484451"}]},"ts":"1732141484451"} 2024-11-20T22:24:44,453 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:24:44,454 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:24:44,454 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141484454"}]},"ts":"1732141484454"} 2024-11-20T22:24:44,455 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:24:44,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f1e2878eea2034576ba469d1952fe84, ASSIGN}] 2024-11-20T22:24:44,502 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f1e2878eea2034576ba469d1952fe84, ASSIGN 2024-11-20T22:24:44,503 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f1e2878eea2034576ba469d1952fe84, ASSIGN; state=OFFLINE, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=false 2024-11-20T22:24:44,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T22:24:44,654 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=5f1e2878eea2034576ba469d1952fe84, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:44,655 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:24:44,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:44,811 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:44,811 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:24:44,812 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,812 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:24:44,812 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,812 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,813 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,819 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:44,819 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f1e2878eea2034576ba469d1952fe84 columnFamilyName A 2024-11-20T22:24:44,819 DEBUG [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:44,822 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.HStore(327): Store=5f1e2878eea2034576ba469d1952fe84/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:44,823 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,831 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:44,831 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f1e2878eea2034576ba469d1952fe84 columnFamilyName B 2024-11-20T22:24:44,831 DEBUG [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:44,832 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.HStore(327): Store=5f1e2878eea2034576ba469d1952fe84/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:44,832 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,833 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:24:44,833 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f1e2878eea2034576ba469d1952fe84 columnFamilyName C 2024-11-20T22:24:44,834 DEBUG [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:24:44,834 INFO [StoreOpener-5f1e2878eea2034576ba469d1952fe84-1 {}] regionserver.HStore(327): Store=5f1e2878eea2034576ba469d1952fe84/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:24:44,834 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:44,835 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,835 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,837 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:24:44,839 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:44,843 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:24:44,844 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 5f1e2878eea2034576ba469d1952fe84; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59970206, jitterRate=-0.10637429356575012}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:24:44,845 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:44,846 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., pid=66, masterSystemTime=1732141484807 2024-11-20T22:24:44,848 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:44,848 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:44,848 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=5f1e2878eea2034576ba469d1952fe84, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:44,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T22:24:44,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 in 193 msec 2024-11-20T22:24:44,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-20T22:24:44,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f1e2878eea2034576ba469d1952fe84, ASSIGN in 349 msec 2024-11-20T22:24:44,854 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:24:44,854 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141484854"}]},"ts":"1732141484854"} 2024-11-20T22:24:44,856 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:24:44,869 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:24:44,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 906 msec 2024-11-20T22:24:45,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T22:24:45,078 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-11-20T22:24:45,080 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d72231b to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@121031b3 2024-11-20T22:24:45,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5758757d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,113 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,115 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,117 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:24:45,119 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49716, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:24:45,122 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x033c112c to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10323551 2024-11-20T22:24:45,246 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45559b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,248 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cdbac5f to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dc28428 2024-11-20T22:24:45,274 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4374e0c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x165a32db to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f4017fc 2024-11-20T22:24:45,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b56a802, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,315 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d679835 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12779f92 2024-11-20T22:24:45,333 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c5f2366, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,335 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32416934 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1ce4ebe1 2024-11-20T22:24:45,376 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49f5bf08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,378 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f2cdd73 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bc52e61 2024-11-20T22:24:45,408 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ca2a0a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,410 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x35ef38cd to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@779bae37 2024-11-20T22:24:45,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e25b7c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,445 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x73db2730 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@591bc56a 2024-11-20T22:24:45,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a43d21a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,479 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d32f3f9 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a0559a0 2024-11-20T22:24:45,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed287f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,499 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x16092bcc to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4691b8f4 2024-11-20T22:24:45,511 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c961a99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:24:45,541 DEBUG [hconnection-0x622392fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,543 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,543 DEBUG [hconnection-0x7e52362c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,545 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,567 DEBUG [hconnection-0x4985dafb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,571 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,591 DEBUG [hconnection-0x146dc893-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,592 DEBUG [master/6365a1e51efd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 523fbb796d2a39aa16176c6f447c7951 changed from -1.0 to 0.0, refreshing cache 2024-11-20T22:24:45,593 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,599 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:45,603 DEBUG [hconnection-0x25633912-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-11-20T22:24:45,604 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,604 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:45,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:45,607 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:45,607 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:45,611 DEBUG [hconnection-0x67767d34-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:45,615 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:24:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:45,671 DEBUG [hconnection-0x8e26a97-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,673 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141545674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141545674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141545675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/48a449b6c7be41a998a64aa3df424dcc is 50, key is test_row_0/A:col10/1732141485614/Put/seqid=0 2024-11-20T22:24:45,693 DEBUG [hconnection-0x459e4330-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,695 DEBUG [hconnection-0x6c2c0577-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,697 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,700 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141545703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:45,714 DEBUG [hconnection-0x3680ed14-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:24:45,715 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49262, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:24:45,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141545717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,764 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:45,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:45,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:45,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:45,767 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742087_1263 (size=12001) 2024-11-20T22:24:45,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/48a449b6c7be41a998a64aa3df424dcc 2024-11-20T22:24:45,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141545779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141545781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141545779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141545808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141545823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/cc5d4600020c4888937d940486c109df is 50, key is test_row_0/B:col10/1732141485614/Put/seqid=0 2024-11-20T22:24:45,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742088_1264 (size=12001) 2024-11-20T22:24:45,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/cc5d4600020c4888937d940486c109df 2024-11-20T22:24:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:45,920 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:45,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:45,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:45,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:45,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:45,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d5866786c86f49d4936cf3f0c59c0718 is 50, key is test_row_0/C:col10/1732141485614/Put/seqid=0 2024-11-20T22:24:45,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742089_1265 (size=12001) 2024-11-20T22:24:45,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d5866786c86f49d4936cf3f0c59c0718 2024-11-20T22:24:45,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/48a449b6c7be41a998a64aa3df424dcc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/48a449b6c7be41a998a64aa3df424dcc 2024-11-20T22:24:45,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/48a449b6c7be41a998a64aa3df424dcc, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T22:24:45,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/cc5d4600020c4888937d940486c109df as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc5d4600020c4888937d940486c109df 2024-11-20T22:24:45,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc5d4600020c4888937d940486c109df, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T22:24:45,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141545987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141545988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:45,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d5866786c86f49d4936cf3f0c59c0718 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d5866786c86f49d4936cf3f0c59c0718 2024-11-20T22:24:45,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d5866786c86f49d4936cf3f0c59c0718, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T22:24:45,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141545989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5f1e2878eea2034576ba469d1952fe84 in 384ms, sequenceid=16, compaction requested=false 2024-11-20T22:24:46,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:46,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:46,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:46,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:46,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:46,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:46,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:46,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:46,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/d94b94443a67423fbe7be6968534af37 is 50, key is test_row_0/A:col10/1732141486015/Put/seqid=0 2024-11-20T22:24:46,073 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742090_1266 (size=12001) 2024-11-20T22:24:46,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/d94b94443a67423fbe7be6968534af37 2024-11-20T22:24:46,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141546091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141546094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0d42d550a99d43989b4a8e06c41a1b2f is 50, key is test_row_0/B:col10/1732141486015/Put/seqid=0 2024-11-20T22:24:46,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742091_1267 (size=12001) 2024-11-20T22:24:46,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141546201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141546203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:46,233 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:46,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:46,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141546293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141546303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141546294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,388 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:46,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:46,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141546406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141546407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,559 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:46,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:46,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0d42d550a99d43989b4a8e06c41a1b2f 2024-11-20T22:24:46,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/7024baac5d1740fca4d95ff715bb54ad is 50, key is test_row_0/C:col10/1732141486015/Put/seqid=0 2024-11-20T22:24:46,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742092_1268 (size=12001) 2024-11-20T22:24:46,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/7024baac5d1740fca4d95ff715bb54ad 2024-11-20T22:24:46,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/d94b94443a67423fbe7be6968534af37 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d94b94443a67423fbe7be6968534af37 2024-11-20T22:24:46,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d94b94443a67423fbe7be6968534af37, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:24:46,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0d42d550a99d43989b4a8e06c41a1b2f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d42d550a99d43989b4a8e06c41a1b2f 2024-11-20T22:24:46,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d42d550a99d43989b4a8e06c41a1b2f, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:24:46,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/7024baac5d1740fca4d95ff715bb54ad as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/7024baac5d1740fca4d95ff715bb54ad 2024-11-20T22:24:46,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:46,714 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:46,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:46,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/7024baac5d1740fca4d95ff715bb54ad, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:24:46,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5f1e2878eea2034576ba469d1952fe84 in 694ms, sequenceid=38, compaction requested=false 2024-11-20T22:24:46,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:46,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:46,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:46,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:46,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:46,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:46,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/e287cab04dee4b87813babc4946884b6 is 50, key is test_row_0/A:col10/1732141486073/Put/seqid=0 2024-11-20T22:24:46,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742093_1269 (size=14341) 2024-11-20T22:24:46,867 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:46,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:46,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:46,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:46,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141546860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141546863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141546864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141546883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141546920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141546984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141546987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:46,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:46,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141546994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141546995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,020 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:47,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:47,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141547027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,177 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:47,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:47,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/e287cab04dee4b87813babc4946884b6 2024-11-20T22:24:47,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/ab23ef6427874597b9f4af8121faef30 is 50, key is test_row_0/B:col10/1732141486073/Put/seqid=0 2024-11-20T22:24:47,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141547196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141547201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141547203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141547205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742094_1270 (size=12001) 2024-11-20T22:24:47,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/ab23ef6427874597b9f4af8121faef30 2024-11-20T22:24:47,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141547236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/ba55bad020d346edb3d220171c33aef6 is 50, key is test_row_0/C:col10/1732141486073/Put/seqid=0 2024-11-20T22:24:47,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742095_1271 (size=12001) 2024-11-20T22:24:47,332 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:47,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:47,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,485 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:47,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:47,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141547503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141547507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141547508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141547511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:47,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141547543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:47,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:47,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:47,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/ba55bad020d346edb3d220171c33aef6 2024-11-20T22:24:47,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/e287cab04dee4b87813babc4946884b6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e287cab04dee4b87813babc4946884b6 2024-11-20T22:24:47,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e287cab04dee4b87813babc4946884b6, entries=200, sequenceid=53, filesize=14.0 K 2024-11-20T22:24:47,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/ab23ef6427874597b9f4af8121faef30 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ab23ef6427874597b9f4af8121faef30 2024-11-20T22:24:47,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ab23ef6427874597b9f4af8121faef30, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T22:24:47,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/ba55bad020d346edb3d220171c33aef6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ba55bad020d346edb3d220171c33aef6 2024-11-20T22:24:47,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ba55bad020d346edb3d220171c33aef6, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T22:24:47,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 5f1e2878eea2034576ba469d1952fe84 in 980ms, sequenceid=53, compaction requested=true 2024-11-20T22:24:47,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:47,705 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:47,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:47,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:47,706 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:47,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:47,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:47,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:47,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:47,707 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:47,707 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:24:47,707 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,707 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/48a449b6c7be41a998a64aa3df424dcc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d94b94443a67423fbe7be6968534af37, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e287cab04dee4b87813babc4946884b6] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=37.4 K 2024-11-20T22:24:47,707 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:47,707 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:24:47,707 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,707 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48a449b6c7be41a998a64aa3df424dcc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732141485614 2024-11-20T22:24:47,708 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d94b94443a67423fbe7be6968534af37, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141485656 2024-11-20T22:24:47,709 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e287cab04dee4b87813babc4946884b6, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141486072 2024-11-20T22:24:47,710 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc5d4600020c4888937d940486c109df, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d42d550a99d43989b4a8e06c41a1b2f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ab23ef6427874597b9f4af8121faef30] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=35.2 K 2024-11-20T22:24:47,710 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting cc5d4600020c4888937d940486c109df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732141485614 2024-11-20T22:24:47,710 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d42d550a99d43989b4a8e06c41a1b2f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141485656 2024-11-20T22:24:47,711 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ab23ef6427874597b9f4af8121faef30, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141486072 2024-11-20T22:24:47,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:47,733 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#216 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:47,733 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/4855ec64aa3a4e849357152f4714fa42 is 50, key is test_row_0/A:col10/1732141486073/Put/seqid=0 2024-11-20T22:24:47,749 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#217 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:47,749 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/10684efff1954ed196dd431054c5cf1f is 50, key is test_row_0/B:col10/1732141486073/Put/seqid=0 2024-11-20T22:24:47,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742096_1272 (size=12104) 2024-11-20T22:24:47,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742097_1273 (size=12104) 2024-11-20T22:24:47,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:47,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T22:24:47,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:47,794 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:47,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:47,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:47,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:47,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:47,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:47,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:47,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/0be0913081834c738459b993ef7a7b36 is 50, key is test_row_0/A:col10/1732141486859/Put/seqid=0 2024-11-20T22:24:47,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742098_1274 (size=12001) 2024-11-20T22:24:47,848 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/0be0913081834c738459b993ef7a7b36 2024-11-20T22:24:47,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/32c854cb05e448eb99f5a785244355a0 is 50, key is test_row_0/B:col10/1732141486859/Put/seqid=0 2024-11-20T22:24:47,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742099_1275 (size=12001) 2024-11-20T22:24:48,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:48,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:48,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141548042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141548043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141548040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141548049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141548055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141548148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141548149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141548149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141548171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,179 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/4855ec64aa3a4e849357152f4714fa42 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/4855ec64aa3a4e849357152f4714fa42 2024-11-20T22:24:48,184 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into 4855ec64aa3a4e849357152f4714fa42(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:48,184 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:48,184 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=13, startTime=1732141487705; duration=0sec 2024-11-20T22:24:48,185 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:48,185 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:24:48,185 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:48,189 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/10684efff1954ed196dd431054c5cf1f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/10684efff1954ed196dd431054c5cf1f 2024-11-20T22:24:48,191 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:48,191 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:24:48,191 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:48,191 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d5866786c86f49d4936cf3f0c59c0718, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/7024baac5d1740fca4d95ff715bb54ad, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ba55bad020d346edb3d220171c33aef6] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=35.2 K 2024-11-20T22:24:48,192 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5866786c86f49d4936cf3f0c59c0718, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732141485614 2024-11-20T22:24:48,192 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7024baac5d1740fca4d95ff715bb54ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141485656 2024-11-20T22:24:48,193 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba55bad020d346edb3d220171c33aef6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141486072 2024-11-20T22:24:48,195 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into 10684efff1954ed196dd431054c5cf1f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:48,195 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:48,195 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=13, startTime=1732141487706; duration=0sec 2024-11-20T22:24:48,195 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:48,196 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:24:48,205 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#220 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:48,206 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/5554b7f91ad94d6fb2cc09a546011188 is 50, key is test_row_0/C:col10/1732141486073/Put/seqid=0 2024-11-20T22:24:48,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742100_1276 (size=12104) 2024-11-20T22:24:48,217 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/5554b7f91ad94d6fb2cc09a546011188 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5554b7f91ad94d6fb2cc09a546011188 2024-11-20T22:24:48,224 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into 5554b7f91ad94d6fb2cc09a546011188(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:48,224 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:48,225 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=13, startTime=1732141487706; duration=0sec 2024-11-20T22:24:48,225 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:48,225 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:24:48,266 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/32c854cb05e448eb99f5a785244355a0 2024-11-20T22:24:48,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/b0874c8060e448e287dc77234ea2754a is 50, key is test_row_0/C:col10/1732141486859/Put/seqid=0 2024-11-20T22:24:48,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742101_1277 (size=12001) 2024-11-20T22:24:48,319 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/b0874c8060e448e287dc77234ea2754a 2024-11-20T22:24:48,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/0be0913081834c738459b993ef7a7b36 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/0be0913081834c738459b993ef7a7b36 2024-11-20T22:24:48,340 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/0be0913081834c738459b993ef7a7b36, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:24:48,346 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:24:48,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/32c854cb05e448eb99f5a785244355a0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/32c854cb05e448eb99f5a785244355a0 2024-11-20T22:24:48,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141548354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141548355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,361 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/32c854cb05e448eb99f5a785244355a0, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:24:48,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/b0874c8060e448e287dc77234ea2754a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/b0874c8060e448e287dc77234ea2754a 2024-11-20T22:24:48,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141548359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,376 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/b0874c8060e448e287dc77234ea2754a, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:24:48,380 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5f1e2878eea2034576ba469d1952fe84 in 586ms, sequenceid=75, compaction requested=false 2024-11-20T22:24:48,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:48,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:48,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-20T22:24:48,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-11-20T22:24:48,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:48,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:48,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:48,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:48,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:48,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:48,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:48,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:48,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-20T22:24:48,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7810 sec 2024-11-20T22:24:48,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 2.7930 sec 2024-11-20T22:24:48,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/858420f45528492d889fda6b7b33d172 is 50, key is test_row_0/A:col10/1732141488041/Put/seqid=0 2024-11-20T22:24:48,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742102_1278 (size=12001) 2024-11-20T22:24:48,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141548496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141548603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141548659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141548659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141548680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141548808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:48,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/858420f45528492d889fda6b7b33d172 2024-11-20T22:24:48,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/c425134f582d4fedb124c3a14debab63 is 50, key is test_row_0/B:col10/1732141488041/Put/seqid=0 2024-11-20T22:24:48,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742103_1279 (size=12001) 2024-11-20T22:24:48,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/c425134f582d4fedb124c3a14debab63 2024-11-20T22:24:48,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/4a2e1c70cc834023904a02534b42d6d1 is 50, key is test_row_0/C:col10/1732141488041/Put/seqid=0 2024-11-20T22:24:48,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742104_1280 (size=12001) 2024-11-20T22:24:48,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/4a2e1c70cc834023904a02534b42d6d1 2024-11-20T22:24:49,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/858420f45528492d889fda6b7b33d172 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/858420f45528492d889fda6b7b33d172 2024-11-20T22:24:49,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/858420f45528492d889fda6b7b33d172, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:24:49,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/c425134f582d4fedb124c3a14debab63 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/c425134f582d4fedb124c3a14debab63 2024-11-20T22:24:49,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/c425134f582d4fedb124c3a14debab63, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:24:49,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/4a2e1c70cc834023904a02534b42d6d1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4a2e1c70cc834023904a02534b42d6d1 2024-11-20T22:24:49,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4a2e1c70cc834023904a02534b42d6d1, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T22:24:49,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5f1e2878eea2034576ba469d1952fe84 in 652ms, sequenceid=93, compaction requested=true 2024-11-20T22:24:49,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:49,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:49,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:49,039 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:49,039 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:49,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:49,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:49,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:49,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:49,042 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:49,042 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:49,042 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:24:49,042 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:24:49,042 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:49,042 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:49,042 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/4855ec64aa3a4e849357152f4714fa42, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/0be0913081834c738459b993ef7a7b36, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/858420f45528492d889fda6b7b33d172] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=35.3 K 2024-11-20T22:24:49,042 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/10684efff1954ed196dd431054c5cf1f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/32c854cb05e448eb99f5a785244355a0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/c425134f582d4fedb124c3a14debab63] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=35.3 K 2024-11-20T22:24:49,044 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 10684efff1954ed196dd431054c5cf1f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141486072 2024-11-20T22:24:49,044 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4855ec64aa3a4e849357152f4714fa42, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141486072 2024-11-20T22:24:49,045 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0be0913081834c738459b993ef7a7b36, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141486859 2024-11-20T22:24:49,045 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 32c854cb05e448eb99f5a785244355a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141486859 2024-11-20T22:24:49,046 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 858420f45528492d889fda6b7b33d172, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141488034 2024-11-20T22:24:49,046 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c425134f582d4fedb124c3a14debab63, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141488034 2024-11-20T22:24:49,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:49,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:49,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:49,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:49,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:49,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:49,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:49,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:49,090 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#225 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:49,091 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/507037353c5f4aa485c871ee27776012 is 50, key is test_row_0/B:col10/1732141488041/Put/seqid=0 2024-11-20T22:24:49,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81 is 50, key is test_row_0/A:col10/1732141488438/Put/seqid=0 2024-11-20T22:24:49,109 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#227 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:49,109 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/21677a04495548e0af26dd598cb26a2a is 50, key is test_row_0/A:col10/1732141488041/Put/seqid=0 2024-11-20T22:24:49,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141549115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141549117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742105_1281 (size=12207) 2024-11-20T22:24:49,139 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/507037353c5f4aa485c871ee27776012 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/507037353c5f4aa485c871ee27776012 2024-11-20T22:24:49,145 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into 507037353c5f4aa485c871ee27776012(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:49,145 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:49,145 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=13, startTime=1732141489039; duration=0sec 2024-11-20T22:24:49,145 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:49,146 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:24:49,146 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:49,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742107_1283 (size=12207) 2024-11-20T22:24:49,148 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:49,149 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:24:49,149 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:49,149 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5554b7f91ad94d6fb2cc09a546011188, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/b0874c8060e448e287dc77234ea2754a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4a2e1c70cc834023904a02534b42d6d1] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=35.3 K 2024-11-20T22:24:49,150 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5554b7f91ad94d6fb2cc09a546011188, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732141486072 2024-11-20T22:24:49,150 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b0874c8060e448e287dc77234ea2754a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141486859 2024-11-20T22:24:49,151 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a2e1c70cc834023904a02534b42d6d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141488034 2024-11-20T22:24:49,152 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/21677a04495548e0af26dd598cb26a2a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/21677a04495548e0af26dd598cb26a2a 2024-11-20T22:24:49,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742106_1282 (size=12001) 2024-11-20T22:24:49,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81 2024-11-20T22:24:49,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141549164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,167 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into 21677a04495548e0af26dd598cb26a2a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:49,167 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:49,167 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=13, startTime=1732141489039; duration=0sec 2024-11-20T22:24:49,167 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:49,167 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:24:49,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/268065b24ea645ab8f7ff2d2014a0dc7 is 50, key is test_row_0/B:col10/1732141488438/Put/seqid=0 2024-11-20T22:24:49,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141549167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,173 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#229 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:49,174 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/49bb991089b34fb98823291d5fa75552 is 50, key is test_row_0/C:col10/1732141488041/Put/seqid=0 2024-11-20T22:24:49,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742108_1284 (size=12001) 2024-11-20T22:24:49,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/268065b24ea645ab8f7ff2d2014a0dc7 2024-11-20T22:24:49,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/518c9df8771a4a18b04101edab4e1ec0 is 50, key is test_row_0/C:col10/1732141488438/Put/seqid=0 2024-11-20T22:24:49,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141549194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742109_1285 (size=12207) 2024-11-20T22:24:49,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141549221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742110_1286 (size=12001) 2024-11-20T22:24:49,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/518c9df8771a4a18b04101edab4e1ec0 2024-11-20T22:24:49,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81 2024-11-20T22:24:49,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81, entries=150, sequenceid=115, filesize=11.7 K 2024-11-20T22:24:49,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/268065b24ea645ab8f7ff2d2014a0dc7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/268065b24ea645ab8f7ff2d2014a0dc7 2024-11-20T22:24:49,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/268065b24ea645ab8f7ff2d2014a0dc7, entries=150, sequenceid=115, filesize=11.7 K 2024-11-20T22:24:49,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/518c9df8771a4a18b04101edab4e1ec0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/518c9df8771a4a18b04101edab4e1ec0 2024-11-20T22:24:49,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/518c9df8771a4a18b04101edab4e1ec0, entries=150, sequenceid=115, filesize=11.7 K 2024-11-20T22:24:49,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5f1e2878eea2034576ba469d1952fe84 in 218ms, sequenceid=115, compaction requested=false 2024-11-20T22:24:49,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:49,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:49,432 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:49,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:49,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:49,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:49,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:49,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:49,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:49,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/ac4601de858b4e068ea5e7d7c3a366c3 is 50, key is test_row_0/A:col10/1732141489095/Put/seqid=0 2024-11-20T22:24:49,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742111_1287 (size=14391) 2024-11-20T22:24:49,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141549544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,610 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/49bb991089b34fb98823291d5fa75552 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/49bb991089b34fb98823291d5fa75552 2024-11-20T22:24:49,619 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into 49bb991089b34fb98823291d5fa75552(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:49,620 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:49,620 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=13, startTime=1732141489040; duration=0sec 2024-11-20T22:24:49,620 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:49,620 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:24:49,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141549623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141549647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T22:24:49,720 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-20T22:24:49,720 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-20T22:24:49,722 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T22:24:49,722 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:49,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T22:24:49,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141549854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,875 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:49,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:49,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:49,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:49,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/ac4601de858b4e068ea5e7d7c3a366c3 2024-11-20T22:24:49,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/a4a4e8a3e13f4a51979f1319ee499774 is 50, key is test_row_0/B:col10/1732141489095/Put/seqid=0 2024-11-20T22:24:50,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742112_1288 (size=12051) 2024-11-20T22:24:50,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/a4a4e8a3e13f4a51979f1319ee499774 2024-11-20T22:24:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T22:24:50,037 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:50,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:50,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/78dd60cd99e3480f9b3604da4d56dbd4 is 50, key is test_row_0/C:col10/1732141489095/Put/seqid=0 2024-11-20T22:24:50,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742113_1289 (size=12051) 2024-11-20T22:24:50,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/78dd60cd99e3480f9b3604da4d56dbd4 2024-11-20T22:24:50,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/ac4601de858b4e068ea5e7d7c3a366c3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ac4601de858b4e068ea5e7d7c3a366c3 2024-11-20T22:24:50,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ac4601de858b4e068ea5e7d7c3a366c3, entries=200, sequenceid=132, filesize=14.1 K 2024-11-20T22:24:50,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/a4a4e8a3e13f4a51979f1319ee499774 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a4a4e8a3e13f4a51979f1319ee499774 2024-11-20T22:24:50,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a4a4e8a3e13f4a51979f1319ee499774, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T22:24:50,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/78dd60cd99e3480f9b3604da4d56dbd4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/78dd60cd99e3480f9b3604da4d56dbd4 2024-11-20T22:24:50,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/78dd60cd99e3480f9b3604da4d56dbd4, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T22:24:50,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5f1e2878eea2034576ba469d1952fe84 in 705ms, sequenceid=132, compaction requested=true 2024-11-20T22:24:50,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:50,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:50,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:50,137 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:50,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:50,138 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:50,141 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:50,141 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:24:50,141 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,141 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/21677a04495548e0af26dd598cb26a2a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ac4601de858b4e068ea5e7d7c3a366c3] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=37.7 K 2024-11-20T22:24:50,142 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:50,142 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:24:50,142 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,142 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/507037353c5f4aa485c871ee27776012, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/268065b24ea645ab8f7ff2d2014a0dc7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a4a4e8a3e13f4a51979f1319ee499774] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=35.4 K 2024-11-20T22:24:50,143 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21677a04495548e0af26dd598cb26a2a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141488034 2024-11-20T22:24:50,143 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 507037353c5f4aa485c871ee27776012, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141488034 2024-11-20T22:24:50,148 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfaf9f3cd98b4bfa90ce83ccc2cf9d81, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732141488438 2024-11-20T22:24:50,149 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 268065b24ea645ab8f7ff2d2014a0dc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732141488438 2024-11-20T22:24:50,149 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac4601de858b4e068ea5e7d7c3a366c3, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141489095 2024-11-20T22:24:50,152 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a4a4e8a3e13f4a51979f1319ee499774, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141489095 2024-11-20T22:24:50,165 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:50,166 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/471fd453e75b44869d965d1f5cb40d5a is 50, key is test_row_0/A:col10/1732141489095/Put/seqid=0 2024-11-20T22:24:50,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:50,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:50,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:50,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:50,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:50,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:50,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:50,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:50,181 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#235 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:50,181 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/7cd120569a5d4b5b9c8f2dc43fba8a06 is 50, key is test_row_0/B:col10/1732141489095/Put/seqid=0 2024-11-20T22:24:50,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/6a0d4d205e49499c83be324d244f32dd is 50, key is test_row_0/A:col10/1732141489499/Put/seqid=0 2024-11-20T22:24:50,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742114_1290 (size=12359) 2024-11-20T22:24:50,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:50,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:50,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,201 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/471fd453e75b44869d965d1f5cb40d5a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/471fd453e75b44869d965d1f5cb40d5a 2024-11-20T22:24:50,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141550200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742116_1292 (size=12151) 2024-11-20T22:24:50,209 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into 471fd453e75b44869d965d1f5cb40d5a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:50,209 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:50,209 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=13, startTime=1732141490137; duration=0sec 2024-11-20T22:24:50,209 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:50,209 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:24:50,209 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:50,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141550209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,211 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:50,211 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:24:50,211 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,211 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/49bb991089b34fb98823291d5fa75552, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/518c9df8771a4a18b04101edab4e1ec0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/78dd60cd99e3480f9b3604da4d56dbd4] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=35.4 K 2024-11-20T22:24:50,212 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49bb991089b34fb98823291d5fa75552, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732141488034 2024-11-20T22:24:50,212 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 518c9df8771a4a18b04101edab4e1ec0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732141488438 2024-11-20T22:24:50,213 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78dd60cd99e3480f9b3604da4d56dbd4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141489095 2024-11-20T22:24:50,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742115_1291 (size=12359) 2024-11-20T22:24:50,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141550210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,220 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#237 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:50,221 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e1696c1cf7094319a3f80bba80ec6ee3 is 50, key is test_row_0/C:col10/1732141489095/Put/seqid=0 2024-11-20T22:24:50,222 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/7cd120569a5d4b5b9c8f2dc43fba8a06 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/7cd120569a5d4b5b9c8f2dc43fba8a06 2024-11-20T22:24:50,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141550209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742117_1293 (size=12359) 2024-11-20T22:24:50,240 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into 7cd120569a5d4b5b9c8f2dc43fba8a06(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:50,240 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:50,240 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=13, startTime=1732141490137; duration=0sec 2024-11-20T22:24:50,240 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:50,240 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:24:50,241 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e1696c1cf7094319a3f80bba80ec6ee3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e1696c1cf7094319a3f80bba80ec6ee3 2024-11-20T22:24:50,249 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into e1696c1cf7094319a3f80bba80ec6ee3(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:50,249 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:50,249 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=13, startTime=1732141490138; duration=0sec 2024-11-20T22:24:50,249 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:50,249 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:24:50,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141550306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141550313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141550328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T22:24:50,351 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:50,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:50,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,507 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:50,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:50,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141550512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141550517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141550546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/6a0d4d205e49499c83be324d244f32dd 2024-11-20T22:24:50,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/68bdb1127e4a43af9c04887908bbf0f3 is 50, key is test_row_0/B:col10/1732141489499/Put/seqid=0 2024-11-20T22:24:50,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742118_1294 (size=12151) 2024-11-20T22:24:50,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141550637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,660 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:50,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:50,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,813 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:50,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:50,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141550815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141550819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T22:24:50,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:50,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141550854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,970 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:50,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:50,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:50,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:50,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:50,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:51,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/68bdb1127e4a43af9c04887908bbf0f3 2024-11-20T22:24:51,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/fec79bd8cea2410ba725ebd0d77e74e8 is 50, key is test_row_0/C:col10/1732141489499/Put/seqid=0 2024-11-20T22:24:51,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742119_1295 (size=12151) 2024-11-20T22:24:51,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/fec79bd8cea2410ba725ebd0d77e74e8 2024-11-20T22:24:51,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/6a0d4d205e49499c83be324d244f32dd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6a0d4d205e49499c83be324d244f32dd 2024-11-20T22:24:51,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6a0d4d205e49499c83be324d244f32dd, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T22:24:51,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/68bdb1127e4a43af9c04887908bbf0f3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/68bdb1127e4a43af9c04887908bbf0f3 2024-11-20T22:24:51,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/68bdb1127e4a43af9c04887908bbf0f3, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T22:24:51,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/fec79bd8cea2410ba725ebd0d77e74e8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fec79bd8cea2410ba725ebd0d77e74e8 2024-11-20T22:24:51,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fec79bd8cea2410ba725ebd0d77e74e8, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T22:24:51,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 5f1e2878eea2034576ba469d1952fe84 in 948ms, sequenceid=155, compaction requested=false 2024-11-20T22:24:51,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:51,122 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T22:24:51,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:51,122 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:51,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:51,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:51,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:51,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:51,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:51,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:51,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/db0a6429d9da43639dace47c16a763dd is 50, key is test_row_0/A:col10/1732141490203/Put/seqid=0 2024-11-20T22:24:51,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742120_1296 (size=12151) 2024-11-20T22:24:51,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:51,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:51,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141551368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141551369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141551369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141551472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141551472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141551472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,540 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/db0a6429d9da43639dace47c16a763dd 2024-11-20T22:24:51,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/df2ebd9e25fd43f5b3577eece3f0237f is 50, key is test_row_0/B:col10/1732141490203/Put/seqid=0 2024-11-20T22:24:51,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742121_1297 (size=12151) 2024-11-20T22:24:51,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141551674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141551676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141551676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T22:24:51,954 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/df2ebd9e25fd43f5b3577eece3f0237f 2024-11-20T22:24:51,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/57e4a0d1bf854806924b9d246355b2ab is 50, key is test_row_0/C:col10/1732141490203/Put/seqid=0 2024-11-20T22:24:51,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742122_1298 (size=12151) 2024-11-20T22:24:51,970 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/57e4a0d1bf854806924b9d246355b2ab 2024-11-20T22:24:51,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/db0a6429d9da43639dace47c16a763dd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/db0a6429d9da43639dace47c16a763dd 2024-11-20T22:24:51,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141551977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,979 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/db0a6429d9da43639dace47c16a763dd, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:24:51,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/df2ebd9e25fd43f5b3577eece3f0237f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/df2ebd9e25fd43f5b3577eece3f0237f 2024-11-20T22:24:51,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141551978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:51,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141551982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:51,984 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/df2ebd9e25fd43f5b3577eece3f0237f, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:24:51,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/57e4a0d1bf854806924b9d246355b2ab as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/57e4a0d1bf854806924b9d246355b2ab 2024-11-20T22:24:51,989 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/57e4a0d1bf854806924b9d246355b2ab, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:24:51,990 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5f1e2878eea2034576ba469d1952fe84 in 868ms, sequenceid=173, compaction requested=true 2024-11-20T22:24:51,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:51,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:51,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-20T22:24:51,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-20T22:24:51,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-20T22:24:51,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2690 sec 2024-11-20T22:24:51,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 2.2740 sec 2024-11-20T22:24:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:52,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:24:52,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:52,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:52,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:52,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:52,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:52,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:52,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/45c54b122cb2444bb213570e04eaaf06 is 50, key is test_row_0/A:col10/1732141491367/Put/seqid=0 2024-11-20T22:24:52,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742123_1299 (size=12151) 2024-11-20T22:24:52,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141552276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:52,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141552380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:52,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141552484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:52,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141552487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:52,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141552495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:52,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141552583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:52,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/45c54b122cb2444bb213570e04eaaf06 2024-11-20T22:24:52,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141552653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:52,657 DEBUG [Thread-1198 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., hostname=6365a1e51efd,46811,1732141422048, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:52,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/f3bd1361b49944c999d23089ba926bbc is 50, key is test_row_0/B:col10/1732141491367/Put/seqid=0 2024-11-20T22:24:52,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742124_1300 (size=12151) 2024-11-20T22:24:52,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/f3bd1361b49944c999d23089ba926bbc 2024-11-20T22:24:52,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/ad35f59d091f4721b1989a82cf9f8afe is 50, key is test_row_0/C:col10/1732141491367/Put/seqid=0 2024-11-20T22:24:52,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742125_1301 (size=12151) 2024-11-20T22:24:52,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141552887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/ad35f59d091f4721b1989a82cf9f8afe 2024-11-20T22:24:53,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/45c54b122cb2444bb213570e04eaaf06 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/45c54b122cb2444bb213570e04eaaf06 2024-11-20T22:24:53,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/45c54b122cb2444bb213570e04eaaf06, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T22:24:53,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/f3bd1361b49944c999d23089ba926bbc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/f3bd1361b49944c999d23089ba926bbc 2024-11-20T22:24:53,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/f3bd1361b49944c999d23089ba926bbc, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T22:24:53,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/ad35f59d091f4721b1989a82cf9f8afe as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ad35f59d091f4721b1989a82cf9f8afe 2024-11-20T22:24:53,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ad35f59d091f4721b1989a82cf9f8afe, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T22:24:53,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5f1e2878eea2034576ba469d1952fe84 in 907ms, sequenceid=195, compaction requested=true 2024-11-20T22:24:53,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:53,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:53,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:53,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:53,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:53,144 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:53,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:53,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:53,144 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:53,150 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:53,150 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:24:53,150 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:53,150 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/471fd453e75b44869d965d1f5cb40d5a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6a0d4d205e49499c83be324d244f32dd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/db0a6429d9da43639dace47c16a763dd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/45c54b122cb2444bb213570e04eaaf06] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=47.7 K 2024-11-20T22:24:53,150 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:53,150 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:24:53,150 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:53,150 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/7cd120569a5d4b5b9c8f2dc43fba8a06, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/68bdb1127e4a43af9c04887908bbf0f3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/df2ebd9e25fd43f5b3577eece3f0237f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/f3bd1361b49944c999d23089ba926bbc] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=47.7 K 2024-11-20T22:24:53,151 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cd120569a5d4b5b9c8f2dc43fba8a06, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141489095 2024-11-20T22:24:53,151 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 471fd453e75b44869d965d1f5cb40d5a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141489095 2024-11-20T22:24:53,153 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a0d4d205e49499c83be324d244f32dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732141489499 2024-11-20T22:24:53,153 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68bdb1127e4a43af9c04887908bbf0f3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732141489499 2024-11-20T22:24:53,155 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting db0a6429d9da43639dace47c16a763dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141490198 2024-11-20T22:24:53,155 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting df2ebd9e25fd43f5b3577eece3f0237f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141490198 2024-11-20T22:24:53,156 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 45c54b122cb2444bb213570e04eaaf06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141491367 2024-11-20T22:24:53,156 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3bd1361b49944c999d23089ba926bbc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141491367 2024-11-20T22:24:53,184 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:53,184 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/369c659bb53f4ebab3ad2d28af9b038c is 50, key is test_row_0/B:col10/1732141491367/Put/seqid=0 2024-11-20T22:24:53,189 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#247 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:53,189 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/35a0d6823e4c4ce6812e93e2cb766afc is 50, key is test_row_0/A:col10/1732141491367/Put/seqid=0 2024-11-20T22:24:53,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742126_1302 (size=12595) 2024-11-20T22:24:53,239 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/369c659bb53f4ebab3ad2d28af9b038c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/369c659bb53f4ebab3ad2d28af9b038c 2024-11-20T22:24:53,246 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into 369c659bb53f4ebab3ad2d28af9b038c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:53,246 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:53,246 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=12, startTime=1732141493144; duration=0sec 2024-11-20T22:24:53,246 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:53,246 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:24:53,246 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:53,250 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:53,250 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:24:53,250 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:53,250 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e1696c1cf7094319a3f80bba80ec6ee3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fec79bd8cea2410ba725ebd0d77e74e8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/57e4a0d1bf854806924b9d246355b2ab, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ad35f59d091f4721b1989a82cf9f8afe] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=47.7 K 2024-11-20T22:24:53,250 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1696c1cf7094319a3f80bba80ec6ee3, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732141489095 2024-11-20T22:24:53,251 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting fec79bd8cea2410ba725ebd0d77e74e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732141489499 2024-11-20T22:24:53,251 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57e4a0d1bf854806924b9d246355b2ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141490198 2024-11-20T22:24:53,251 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad35f59d091f4721b1989a82cf9f8afe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141491367 2024-11-20T22:24:53,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742127_1303 (size=12595) 2024-11-20T22:24:53,277 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#248 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:53,277 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e8f59f256fa84ec094ce79f9e3799219 is 50, key is test_row_0/C:col10/1732141491367/Put/seqid=0 2024-11-20T22:24:53,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742128_1304 (size=12595) 2024-11-20T22:24:53,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:53,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:53,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:53,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:53,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:53,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:53,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:53,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/82c8a64ef119446ba4eafe1946bc7ea8 is 50, key is test_row_0/A:col10/1732141492275/Put/seqid=0 2024-11-20T22:24:53,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742129_1305 (size=12151) 2024-11-20T22:24:53,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/82c8a64ef119446ba4eafe1946bc7ea8 2024-11-20T22:24:53,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/4a092db6241842058f9a0a07579a29e3 is 50, key is test_row_0/B:col10/1732141492275/Put/seqid=0 2024-11-20T22:24:53,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141553499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141553498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141553504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141553504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742130_1306 (size=12151) 2024-11-20T22:24:53,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141553605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141553609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141553610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,674 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/35a0d6823e4c4ce6812e93e2cb766afc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/35a0d6823e4c4ce6812e93e2cb766afc 2024-11-20T22:24:53,681 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into 35a0d6823e4c4ce6812e93e2cb766afc(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:53,681 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:53,681 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=12, startTime=1732141493144; duration=0sec 2024-11-20T22:24:53,682 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:53,682 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:24:53,743 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e8f59f256fa84ec094ce79f9e3799219 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e8f59f256fa84ec094ce79f9e3799219 2024-11-20T22:24:53,754 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into e8f59f256fa84ec094ce79f9e3799219(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:53,754 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:53,754 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=12, startTime=1732141493144; duration=0sec 2024-11-20T22:24:53,754 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:53,754 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:24:53,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141553810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141553816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:53,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141553816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T22:24:53,836 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-20T22:24:53,838 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:53,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-20T22:24:53,840 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:53,841 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:53,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:53,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:53,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/4a092db6241842058f9a0a07579a29e3 2024-11-20T22:24:53,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:53,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/5229f598ef33472882a2f00678adc7ab is 50, key is test_row_0/C:col10/1732141492275/Put/seqid=0 2024-11-20T22:24:53,993 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:53,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:53,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:53,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:53,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:53,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:53,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742131_1307 (size=12151) 2024-11-20T22:24:54,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141554115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141554120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141554120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:54,148 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:54,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,301 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:54,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:54,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:54,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:54,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/5229f598ef33472882a2f00678adc7ab 2024-11-20T22:24:54,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/82c8a64ef119446ba4eafe1946bc7ea8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/82c8a64ef119446ba4eafe1946bc7ea8 2024-11-20T22:24:54,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:54,454 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:54,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:54,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:54,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:54,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:54,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/82c8a64ef119446ba4eafe1946bc7ea8, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T22:24:54,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/4a092db6241842058f9a0a07579a29e3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/4a092db6241842058f9a0a07579a29e3 2024-11-20T22:24:54,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/4a092db6241842058f9a0a07579a29e3, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T22:24:54,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/5229f598ef33472882a2f00678adc7ab as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5229f598ef33472882a2f00678adc7ab 2024-11-20T22:24:54,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5229f598ef33472882a2f00678adc7ab, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T22:24:54,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5f1e2878eea2034576ba469d1952fe84 in 1142ms, sequenceid=211, compaction requested=false 2024-11-20T22:24:54,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:54,608 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T22:24:54,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:54,608 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:24:54,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:54,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:54,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:54,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:54,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:54,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:54,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/7dfca359c7334320b8e42dcc67fa0caa is 50, key is test_row_0/A:col10/1732141493495/Put/seqid=0 2024-11-20T22:24:54,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:54,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:54,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742132_1308 (size=12151) 2024-11-20T22:24:54,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141554665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141554666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,679 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/7dfca359c7334320b8e42dcc67fa0caa 2024-11-20T22:24:54,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141554665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/9749af848e66429ebfc6a99cb0ed32d2 is 50, key is test_row_0/B:col10/1732141493495/Put/seqid=0 2024-11-20T22:24:54,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742133_1309 (size=12151) 2024-11-20T22:24:54,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141554774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141554774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141554783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:54,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141554977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141554978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:54,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:54,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141554992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:55,152 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/9749af848e66429ebfc6a99cb0ed32d2 2024-11-20T22:24:55,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e4e4ee19b95149c7a72f91c2f5af647e is 50, key is test_row_0/C:col10/1732141493495/Put/seqid=0 2024-11-20T22:24:55,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742134_1310 (size=12151) 2024-11-20T22:24:55,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141555290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:55,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141555291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:55,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141555299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:55,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141555505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:55,509 DEBUG [Thread-1200 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., hostname=6365a1e51efd,46811,1732141422048, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:55,634 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e4e4ee19b95149c7a72f91c2f5af647e 2024-11-20T22:24:55,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/7dfca359c7334320b8e42dcc67fa0caa as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/7dfca359c7334320b8e42dcc67fa0caa 2024-11-20T22:24:55,656 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/7dfca359c7334320b8e42dcc67fa0caa, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T22:24:55,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/9749af848e66429ebfc6a99cb0ed32d2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/9749af848e66429ebfc6a99cb0ed32d2 2024-11-20T22:24:55,664 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/9749af848e66429ebfc6a99cb0ed32d2, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T22:24:55,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e4e4ee19b95149c7a72f91c2f5af647e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e4e4ee19b95149c7a72f91c2f5af647e 2024-11-20T22:24:55,679 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e4e4ee19b95149c7a72f91c2f5af647e, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T22:24:55,681 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 5f1e2878eea2034576ba469d1952fe84 in 1073ms, sequenceid=234, compaction requested=true 2024-11-20T22:24:55,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:55,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:55,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-20T22:24:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-20T22:24:55,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-20T22:24:55,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8420 sec 2024-11-20T22:24:55,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.8580 sec 2024-11-20T22:24:55,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T22:24:55,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:55,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:55,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:55,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:55,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:55,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:55,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/1dd72a35427b4dcea4f9e3d9b9742e0a is 50, key is test_row_0/A:col10/1732141494663/Put/seqid=0 2024-11-20T22:24:55,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742135_1311 (size=14541) 2024-11-20T22:24:55,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/1dd72a35427b4dcea4f9e3d9b9742e0a 2024-11-20T22:24:55,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/646d2fe5c49b411d809e70091247259a is 50, key is test_row_0/B:col10/1732141494663/Put/seqid=0 2024-11-20T22:24:55,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742136_1312 (size=12151) 2024-11-20T22:24:55,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/646d2fe5c49b411d809e70091247259a 2024-11-20T22:24:55,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141555884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:55,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141555889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:55,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:55,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141555890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:55,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/18278b0fc0ed46e3982861aa6c266555 is 50, key is test_row_0/C:col10/1732141494663/Put/seqid=0 2024-11-20T22:24:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T22:24:55,950 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-20T22:24:55,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742137_1313 (size=12151) 2024-11-20T22:24:55,953 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-20T22:24:55,954 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:55,955 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:55,955 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:56,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141555997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141555998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141555998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:56,109 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T22:24:56,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:56,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:56,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:56,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141556204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141556210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141556210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:56,268 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T22:24:56,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:56,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:56,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:56,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:56,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/18278b0fc0ed46e3982861aa6c266555 2024-11-20T22:24:56,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/1dd72a35427b4dcea4f9e3d9b9742e0a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/1dd72a35427b4dcea4f9e3d9b9742e0a 2024-11-20T22:24:56,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/1dd72a35427b4dcea4f9e3d9b9742e0a, entries=200, sequenceid=250, filesize=14.2 K 2024-11-20T22:24:56,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/646d2fe5c49b411d809e70091247259a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/646d2fe5c49b411d809e70091247259a 2024-11-20T22:24:56,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/646d2fe5c49b411d809e70091247259a, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T22:24:56,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/18278b0fc0ed46e3982861aa6c266555 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/18278b0fc0ed46e3982861aa6c266555 2024-11-20T22:24:56,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/18278b0fc0ed46e3982861aa6c266555, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T22:24:56,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 5f1e2878eea2034576ba469d1952fe84 in 593ms, sequenceid=250, compaction requested=true 2024-11-20T22:24:56,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:56,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:56,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:56,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:56,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:56,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:56,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:24:56,397 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:56,397 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:56,398 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:56,398 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:24:56,399 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:56,399 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/369c659bb53f4ebab3ad2d28af9b038c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/4a092db6241842058f9a0a07579a29e3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/9749af848e66429ebfc6a99cb0ed32d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/646d2fe5c49b411d809e70091247259a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=47.9 K 2024-11-20T22:24:56,399 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51438 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:56,399 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:24:56,399 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:56,399 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/35a0d6823e4c4ce6812e93e2cb766afc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/82c8a64ef119446ba4eafe1946bc7ea8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/7dfca359c7334320b8e42dcc67fa0caa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/1dd72a35427b4dcea4f9e3d9b9742e0a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=50.2 K 2024-11-20T22:24:56,399 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 369c659bb53f4ebab3ad2d28af9b038c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141491367 2024-11-20T22:24:56,399 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 35a0d6823e4c4ce6812e93e2cb766afc, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141491367 2024-11-20T22:24:56,400 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a092db6241842058f9a0a07579a29e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141492271 2024-11-20T22:24:56,400 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 82c8a64ef119446ba4eafe1946bc7ea8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141492271 2024-11-20T22:24:56,400 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9749af848e66429ebfc6a99cb0ed32d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732141493495 2024-11-20T22:24:56,401 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7dfca359c7334320b8e42dcc67fa0caa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732141493495 2024-11-20T22:24:56,401 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 646d2fe5c49b411d809e70091247259a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141494663 2024-11-20T22:24:56,401 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dd72a35427b4dcea4f9e3d9b9742e0a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141494663 2024-11-20T22:24:56,417 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:56,418 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/bcf50c3453704785b0e18985f60ea35d is 50, key is test_row_0/B:col10/1732141494663/Put/seqid=0 2024-11-20T22:24:56,422 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T22:24:56,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:56,423 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:24:56,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:56,429 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#259 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:56,430 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/b172edc6c8ac4f2c9724f1ebf2cb52c5 is 50, key is test_row_0/A:col10/1732141494663/Put/seqid=0 2024-11-20T22:24:56,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/8dc649759cf64188b1adf47127b84fb4 is 50, key is test_row_0/A:col10/1732141495873/Put/seqid=0 2024-11-20T22:24:56,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742138_1314 (size=12731) 2024-11-20T22:24:56,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:56,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:56,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742139_1315 (size=12731) 2024-11-20T22:24:56,527 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/b172edc6c8ac4f2c9724f1ebf2cb52c5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b172edc6c8ac4f2c9724f1ebf2cb52c5 2024-11-20T22:24:56,534 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into b172edc6c8ac4f2c9724f1ebf2cb52c5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:56,534 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:56,534 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=12, startTime=1732141496396; duration=0sec 2024-11-20T22:24:56,534 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:56,534 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:24:56,534 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:24:56,538 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:24:56,539 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:24:56,539 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:56,539 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e8f59f256fa84ec094ce79f9e3799219, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5229f598ef33472882a2f00678adc7ab, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e4e4ee19b95149c7a72f91c2f5af647e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/18278b0fc0ed46e3982861aa6c266555] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=47.9 K 2024-11-20T22:24:56,539 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e8f59f256fa84ec094ce79f9e3799219, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732141491367 2024-11-20T22:24:56,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742140_1316 (size=12301) 2024-11-20T22:24:56,540 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5229f598ef33472882a2f00678adc7ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141492271 2024-11-20T22:24:56,541 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/8dc649759cf64188b1adf47127b84fb4 2024-11-20T22:24:56,542 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e4e4ee19b95149c7a72f91c2f5af647e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732141493495 2024-11-20T22:24:56,544 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 18278b0fc0ed46e3982861aa6c266555, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141494663 2024-11-20T22:24:56,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:56,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0d588eed3189440999d01ee2a942dd60 is 50, key is test_row_0/B:col10/1732141495873/Put/seqid=0 2024-11-20T22:24:56,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141556558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141556559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141556566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,589 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#262 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:56,590 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/1fd9fc65b5d54291a15610089d31b4f4 is 50, key is test_row_0/C:col10/1732141494663/Put/seqid=0 2024-11-20T22:24:56,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742141_1317 (size=12301) 2024-11-20T22:24:56,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742142_1318 (size=12731) 2024-11-20T22:24:56,658 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/1fd9fc65b5d54291a15610089d31b4f4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/1fd9fc65b5d54291a15610089d31b4f4 2024-11-20T22:24:56,669 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into 1fd9fc65b5d54291a15610089d31b4f4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:56,670 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:56,670 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=12, startTime=1732141496396; duration=0sec 2024-11-20T22:24:56,670 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:56,670 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:24:56,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141556667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141556667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141556673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49262 deadline: 1732141556689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,693 DEBUG [Thread-1198 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8198 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., hostname=6365a1e51efd,46811,1732141422048, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:24:56,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141556874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141556874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:56,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141556880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:56,892 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/bcf50c3453704785b0e18985f60ea35d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/bcf50c3453704785b0e18985f60ea35d 2024-11-20T22:24:56,898 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into bcf50c3453704785b0e18985f60ea35d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:56,898 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:56,898 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=12, startTime=1732141496396; duration=0sec 2024-11-20T22:24:56,898 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:56,898 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:24:57,043 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0d588eed3189440999d01ee2a942dd60 2024-11-20T22:24:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:57,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/6c6b07b181654c3a98363a3e49138823 is 50, key is test_row_0/C:col10/1732141495873/Put/seqid=0 2024-11-20T22:24:57,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742143_1319 (size=12301) 2024-11-20T22:24:57,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141557181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:57,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141557181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:57,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141557189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:57,520 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/6c6b07b181654c3a98363a3e49138823 2024-11-20T22:24:57,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/8dc649759cf64188b1adf47127b84fb4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8dc649759cf64188b1adf47127b84fb4 2024-11-20T22:24:57,528 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8dc649759cf64188b1adf47127b84fb4, entries=150, sequenceid=270, filesize=12.0 K 2024-11-20T22:24:57,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0d588eed3189440999d01ee2a942dd60 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d588eed3189440999d01ee2a942dd60 2024-11-20T22:24:57,547 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d588eed3189440999d01ee2a942dd60, entries=150, sequenceid=270, filesize=12.0 K 2024-11-20T22:24:57,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/6c6b07b181654c3a98363a3e49138823 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/6c6b07b181654c3a98363a3e49138823 2024-11-20T22:24:57,563 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/6c6b07b181654c3a98363a3e49138823, entries=150, sequenceid=270, filesize=12.0 K 2024-11-20T22:24:57,565 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 5f1e2878eea2034576ba469d1952fe84 in 1142ms, sequenceid=270, compaction requested=false 2024-11-20T22:24:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-20T22:24:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-20T22:24:57,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T22:24:57,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6120 sec 2024-11-20T22:24:57,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.6220 sec 2024-11-20T22:24:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:57,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T22:24:57,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:57,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:57,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:57,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:57,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:57,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:57,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/d026d8ae45904d0e87079235f5da2505 is 50, key is test_row_0/A:col10/1732141496558/Put/seqid=0 2024-11-20T22:24:57,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141557719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141557720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:57,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141557723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:57,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742144_1320 (size=12301) 2024-11-20T22:24:57,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141557824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:57,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141557824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:57,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141557828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141558029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141558029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141558034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T22:24:58,062 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-20T22:24:58,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:24:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-20T22:24:58,067 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:24:58,068 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:24:58,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:24:58,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:58,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/d026d8ae45904d0e87079235f5da2505 2024-11-20T22:24:58,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/ff6dfda31dc342a39fd2ce1b37b75010 is 50, key is test_row_0/B:col10/1732141496558/Put/seqid=0 2024-11-20T22:24:58,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742145_1321 (size=12301) 2024-11-20T22:24:58,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/ff6dfda31dc342a39fd2ce1b37b75010 2024-11-20T22:24:58,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/c789e6695f474a239d3922d575aa35f9 is 50, key is test_row_0/C:col10/1732141496558/Put/seqid=0 2024-11-20T22:24:58,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:58,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:58,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742146_1322 (size=12301) 2024-11-20T22:24:58,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/c789e6695f474a239d3922d575aa35f9 2024-11-20T22:24:58,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/d026d8ae45904d0e87079235f5da2505 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d026d8ae45904d0e87079235f5da2505 2024-11-20T22:24:58,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d026d8ae45904d0e87079235f5da2505, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T22:24:58,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/ff6dfda31dc342a39fd2ce1b37b75010 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ff6dfda31dc342a39fd2ce1b37b75010 2024-11-20T22:24:58,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ff6dfda31dc342a39fd2ce1b37b75010, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T22:24:58,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/c789e6695f474a239d3922d575aa35f9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/c789e6695f474a239d3922d575aa35f9 2024-11-20T22:24:58,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/c789e6695f474a239d3922d575aa35f9, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T22:24:58,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5f1e2878eea2034576ba469d1952fe84 in 620ms, sequenceid=291, compaction requested=true 2024-11-20T22:24:58,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:58,309 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:58,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:24:58,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:58,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:58,310 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:58,310 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:24:58,310 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,310 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b172edc6c8ac4f2c9724f1ebf2cb52c5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8dc649759cf64188b1adf47127b84fb4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d026d8ae45904d0e87079235f5da2505] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.5 K 2024-11-20T22:24:58,311 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:58,311 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b172edc6c8ac4f2c9724f1ebf2cb52c5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141494663 2024-11-20T22:24:58,311 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:24:58,311 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:24:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:58,311 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/bcf50c3453704785b0e18985f60ea35d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d588eed3189440999d01ee2a942dd60, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ff6dfda31dc342a39fd2ce1b37b75010] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.5 K 2024-11-20T22:24:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:24:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:58,311 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dc649759cf64188b1adf47127b84fb4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732141495873 2024-11-20T22:24:58,311 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting bcf50c3453704785b0e18985f60ea35d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141494663 2024-11-20T22:24:58,312 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d588eed3189440999d01ee2a942dd60, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732141495873 2024-11-20T22:24:58,312 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d026d8ae45904d0e87079235f5da2505, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732141496558 2024-11-20T22:24:58,312 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ff6dfda31dc342a39fd2ce1b37b75010, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732141496558 2024-11-20T22:24:58,336 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:58,339 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/c0da6d0173b3438d9bf64cc084dec51f is 50, key is test_row_0/A:col10/1732141496558/Put/seqid=0 2024-11-20T22:24:58,343 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#268 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:58,343 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/5a4579ddafaa48949ae31aae800d891f is 50, key is test_row_0/B:col10/1732141496558/Put/seqid=0 2024-11-20T22:24:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:58,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T22:24:58,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:58,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:58,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:58,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:58,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:58,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:58,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:58,373 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:58,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742148_1324 (size=12983) 2024-11-20T22:24:58,418 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/5a4579ddafaa48949ae31aae800d891f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/5a4579ddafaa48949ae31aae800d891f 2024-11-20T22:24:58,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742147_1323 (size=12983) 2024-11-20T22:24:58,423 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into 5a4579ddafaa48949ae31aae800d891f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:58,423 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:58,424 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=13, startTime=1732141498310; duration=0sec 2024-11-20T22:24:58,424 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:24:58,424 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:24:58,424 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:24:58,425 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:24:58,425 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:24:58,425 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,425 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/1fd9fc65b5d54291a15610089d31b4f4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/6c6b07b181654c3a98363a3e49138823, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/c789e6695f474a239d3922d575aa35f9] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.5 K 2024-11-20T22:24:58,426 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fd9fc65b5d54291a15610089d31b4f4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732141494663 2024-11-20T22:24:58,427 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c6b07b181654c3a98363a3e49138823, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732141495873 2024-11-20T22:24:58,428 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c789e6695f474a239d3922d575aa35f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732141496558 2024-11-20T22:24:58,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/6f57bcb47f73487f8399bc3409b7d2e2 is 50, key is test_row_0/A:col10/1732141497714/Put/seqid=0 2024-11-20T22:24:58,434 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/c0da6d0173b3438d9bf64cc084dec51f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/c0da6d0173b3438d9bf64cc084dec51f 2024-11-20T22:24:58,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141558419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141558428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141558435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,442 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into c0da6d0173b3438d9bf64cc084dec51f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:58,442 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:58,442 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=13, startTime=1732141498309; duration=0sec 2024-11-20T22:24:58,442 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:58,442 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:24:58,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742149_1325 (size=14741) 2024-11-20T22:24:58,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/6f57bcb47f73487f8399bc3409b7d2e2 2024-11-20T22:24:58,486 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#270 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:24:58,488 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/bb77dc10847141dc8855f97324eaa3b8 is 50, key is test_row_0/C:col10/1732141496558/Put/seqid=0 2024-11-20T22:24:58,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/a62854ebebc140f4b20445db64681bb3 is 50, key is test_row_0/B:col10/1732141497714/Put/seqid=0 2024-11-20T22:24:58,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:58,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:58,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141558538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141558542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141558542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742150_1326 (size=12983) 2024-11-20T22:24:58,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742151_1327 (size=12301) 2024-11-20T22:24:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:58,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:58,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:58,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141558742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141558747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:58,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141558747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,843 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:58,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:58,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:58,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:58,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:58,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/a62854ebebc140f4b20445db64681bb3 2024-11-20T22:24:59,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,006 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/bb77dc10847141dc8855f97324eaa3b8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/bb77dc10847141dc8855f97324eaa3b8 2024-11-20T22:24:59,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:59,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:59,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:24:59,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:59,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:59,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:24:59,020 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into bb77dc10847141dc8855f97324eaa3b8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:24:59,020 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:59,020 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=13, startTime=1732141498311; duration=0sec 2024-11-20T22:24:59,020 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:24:59,021 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:24:59,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/39e42f113b9242199a15b08ef6f84505 is 50, key is test_row_0/C:col10/1732141497714/Put/seqid=0 2024-11-20T22:24:59,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141559054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141559071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141559071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742152_1328 (size=12301) 2024-11-20T22:24:59,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/39e42f113b9242199a15b08ef6f84505 2024-11-20T22:24:59,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/6f57bcb47f73487f8399bc3409b7d2e2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6f57bcb47f73487f8399bc3409b7d2e2 2024-11-20T22:24:59,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6f57bcb47f73487f8399bc3409b7d2e2, entries=200, sequenceid=312, filesize=14.4 K 2024-11-20T22:24:59,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/a62854ebebc140f4b20445db64681bb3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a62854ebebc140f4b20445db64681bb3 2024-11-20T22:24:59,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a62854ebebc140f4b20445db64681bb3, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T22:24:59,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/39e42f113b9242199a15b08ef6f84505 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/39e42f113b9242199a15b08ef6f84505 2024-11-20T22:24:59,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/39e42f113b9242199a15b08ef6f84505, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T22:24:59,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 5f1e2878eea2034576ba469d1952fe84 in 778ms, sequenceid=312, compaction requested=false 2024-11-20T22:24:59,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:59,171 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T22:24:59,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:59,171 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:24:59,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:59,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:59,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:59,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:59,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:59,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:59,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:24:59,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/cc4756ef8552481b8de477e61dad3dd0 is 50, key is test_row_0/A:col10/1732141498428/Put/seqid=0 2024-11-20T22:24:59,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742153_1329 (size=12301) 2024-11-20T22:24:59,237 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/cc4756ef8552481b8de477e61dad3dd0 2024-11-20T22:24:59,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/40904a6239e24ad9908c2c0e2f1fdadc is 50, key is test_row_0/B:col10/1732141498428/Put/seqid=0 2024-11-20T22:24:59,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742154_1330 (size=12301) 2024-11-20T22:24:59,310 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/40904a6239e24ad9908c2c0e2f1fdadc 2024-11-20T22:24:59,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2ed08234110a4b308f1d5e663deba249 is 50, key is test_row_0/C:col10/1732141498428/Put/seqid=0 2024-11-20T22:24:59,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742155_1331 (size=12301) 2024-11-20T22:24:59,380 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2ed08234110a4b308f1d5e663deba249 2024-11-20T22:24:59,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/cc4756ef8552481b8de477e61dad3dd0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/cc4756ef8552481b8de477e61dad3dd0 2024-11-20T22:24:59,401 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/cc4756ef8552481b8de477e61dad3dd0, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:24:59,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/40904a6239e24ad9908c2c0e2f1fdadc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/40904a6239e24ad9908c2c0e2f1fdadc 2024-11-20T22:24:59,415 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/40904a6239e24ad9908c2c0e2f1fdadc, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:24:59,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2ed08234110a4b308f1d5e663deba249 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2ed08234110a4b308f1d5e663deba249 2024-11-20T22:24:59,422 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2ed08234110a4b308f1d5e663deba249, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:24:59,426 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=0 B/0 for 5f1e2878eea2034576ba469d1952fe84 in 255ms, sequenceid=330, compaction requested=true 2024-11-20T22:24:59,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:24:59,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:24:59,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-20T22:24:59,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-20T22:24:59,428 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T22:24:59,429 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3590 sec 2024-11-20T22:24:59,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.3660 sec 2024-11-20T22:24:59,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:24:59,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:24:59,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:59,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:24:59,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:59,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:24:59,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:24:59,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:24:59,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/26b11084430d4238830cbf78116dcc25 is 50, key is test_row_0/A:col10/1732141499567/Put/seqid=0 2024-11-20T22:24:59,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742156_1332 (size=14741) 2024-11-20T22:24:59,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141559707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141559720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141559721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141559727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141559827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141559831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141559831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:24:59,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:24:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141559837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141560031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141560037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141560044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141560046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/26b11084430d4238830cbf78116dcc25 2024-11-20T22:25:00,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/3eca7fcbabad4f79bd1c95c1e81bd208 is 50, key is test_row_0/B:col10/1732141499567/Put/seqid=0 2024-11-20T22:25:00,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742157_1333 (size=12301) 2024-11-20T22:25:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T22:25:00,180 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-20T22:25:00,183 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-20T22:25:00,184 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:00,184 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:00,184 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:25:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:25:00,336 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:25:00,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,337 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141560337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141560343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141560352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141560352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,488 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:25:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:00,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:25:00,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/3eca7fcbabad4f79bd1c95c1e81bd208 2024-11-20T22:25:00,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2a5adb553adb456b8862980f792e8aae is 50, key is test_row_0/C:col10/1732141499567/Put/seqid=0 2024-11-20T22:25:00,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742158_1334 (size=12301) 2024-11-20T22:25:00,641 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:25:00,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:00,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:25:00,796 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:25:00,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:00,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141560851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141560854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141560871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:00,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141560873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,955 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:00,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:25:00,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:00,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:00,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:00,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:01,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2a5adb553adb456b8862980f792e8aae 2024-11-20T22:25:01,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/26b11084430d4238830cbf78116dcc25 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/26b11084430d4238830cbf78116dcc25 2024-11-20T22:25:01,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/26b11084430d4238830cbf78116dcc25, entries=200, sequenceid=341, filesize=14.4 K 2024-11-20T22:25:01,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/3eca7fcbabad4f79bd1c95c1e81bd208 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/3eca7fcbabad4f79bd1c95c1e81bd208 2024-11-20T22:25:01,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/3eca7fcbabad4f79bd1c95c1e81bd208, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T22:25:01,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2a5adb553adb456b8862980f792e8aae as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2a5adb553adb456b8862980f792e8aae 2024-11-20T22:25:01,114 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:01,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2a5adb553adb456b8862980f792e8aae, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T22:25:01,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T22:25:01,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 5f1e2878eea2034576ba469d1952fe84 in 1533ms, sequenceid=341, compaction requested=true 2024-11-20T22:25:01,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:01,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:01,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:01,115 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:01,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:01,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:01,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:01,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:01,115 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:01,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:01,119 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:25:01,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:25:01,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:25:01,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:25:01,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,131 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:01,131 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54766 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:01,131 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:25:01,131 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:25:01,131 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:01,131 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/5a4579ddafaa48949ae31aae800d891f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a62854ebebc140f4b20445db64681bb3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/40904a6239e24ad9908c2c0e2f1fdadc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/3eca7fcbabad4f79bd1c95c1e81bd208] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=48.7 K 2024-11-20T22:25:01,132 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:01,132 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/c0da6d0173b3438d9bf64cc084dec51f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6f57bcb47f73487f8399bc3409b7d2e2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/cc4756ef8552481b8de477e61dad3dd0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/26b11084430d4238830cbf78116dcc25] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=53.5 K 2024-11-20T22:25:01,136 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a4579ddafaa48949ae31aae800d891f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732141496558 2024-11-20T22:25:01,136 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0da6d0173b3438d9bf64cc084dec51f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732141496558 2024-11-20T22:25:01,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/dca88a6b60ad4039b74da1a6e47b6e8f is 50, key is test_row_0/A:col10/1732141499720/Put/seqid=0 2024-11-20T22:25:01,141 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a62854ebebc140f4b20445db64681bb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732141497714 2024-11-20T22:25:01,141 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f57bcb47f73487f8399bc3409b7d2e2, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732141497714 2024-11-20T22:25:01,142 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 40904a6239e24ad9908c2c0e2f1fdadc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141498414 2024-11-20T22:25:01,142 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc4756ef8552481b8de477e61dad3dd0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141498414 2024-11-20T22:25:01,142 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eca7fcbabad4f79bd1c95c1e81bd208, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141499567 2024-11-20T22:25:01,147 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26b11084430d4238830cbf78116dcc25, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141499565 2024-11-20T22:25:01,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742159_1335 (size=12301) 2024-11-20T22:25:01,190 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#280 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:01,191 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/dca88a6b60ad4039b74da1a6e47b6e8f 2024-11-20T22:25:01,191 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/e70b5bbbf4f645b4ababdd24628fbfd2 is 50, key is test_row_0/A:col10/1732141499567/Put/seqid=0 2024-11-20T22:25:01,210 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#281 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:01,211 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/d1754d65faf24e2a9854a74b3b913109 is 50, key is test_row_0/B:col10/1732141499567/Put/seqid=0 2024-11-20T22:25:01,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/fe560b9270b6455aa51e605e0ccc1c83 is 50, key is test_row_0/B:col10/1732141499720/Put/seqid=0 2024-11-20T22:25:01,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742160_1336 (size=13119) 2024-11-20T22:25:01,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742161_1337 (size=13119) 2024-11-20T22:25:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:25:01,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742162_1338 (size=12301) 2024-11-20T22:25:01,654 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/e70b5bbbf4f645b4ababdd24628fbfd2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e70b5bbbf4f645b4ababdd24628fbfd2 2024-11-20T22:25:01,662 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into e70b5bbbf4f645b4ababdd24628fbfd2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:01,662 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:01,662 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=12, startTime=1732141501115; duration=0sec 2024-11-20T22:25:01,662 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:01,662 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:25:01,662 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:01,663 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:01,664 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:25:01,664 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:01,664 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/bb77dc10847141dc8855f97324eaa3b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/39e42f113b9242199a15b08ef6f84505, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2ed08234110a4b308f1d5e663deba249, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2a5adb553adb456b8862980f792e8aae] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=48.7 K 2024-11-20T22:25:01,665 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb77dc10847141dc8855f97324eaa3b8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732141496558 2024-11-20T22:25:01,665 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39e42f113b9242199a15b08ef6f84505, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732141497714 2024-11-20T22:25:01,666 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ed08234110a4b308f1d5e663deba249, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141498414 2024-11-20T22:25:01,666 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a5adb553adb456b8862980f792e8aae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141499567 2024-11-20T22:25:01,688 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#283 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:01,689 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/55ab26023d17415bbcf4a324e0724951 is 50, key is test_row_0/C:col10/1732141499567/Put/seqid=0 2024-11-20T22:25:01,700 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/d1754d65faf24e2a9854a74b3b913109 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/d1754d65faf24e2a9854a74b3b913109 2024-11-20T22:25:01,708 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into d1754d65faf24e2a9854a74b3b913109(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:01,708 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:01,708 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=12, startTime=1732141501115; duration=0sec 2024-11-20T22:25:01,708 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:01,708 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:25:01,723 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/fe560b9270b6455aa51e605e0ccc1c83 2024-11-20T22:25:01,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742163_1339 (size=13119) 2024-11-20T22:25:01,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d8735216ead04bcab6098e83dce8aff8 is 50, key is test_row_0/C:col10/1732141499720/Put/seqid=0 2024-11-20T22:25:01,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742164_1340 (size=12301) 2024-11-20T22:25:01,796 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d8735216ead04bcab6098e83dce8aff8 2024-11-20T22:25:01,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/dca88a6b60ad4039b74da1a6e47b6e8f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dca88a6b60ad4039b74da1a6e47b6e8f 2024-11-20T22:25:01,811 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dca88a6b60ad4039b74da1a6e47b6e8f, entries=150, sequenceid=367, filesize=12.0 K 2024-11-20T22:25:01,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/fe560b9270b6455aa51e605e0ccc1c83 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/fe560b9270b6455aa51e605e0ccc1c83 2024-11-20T22:25:01,819 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/fe560b9270b6455aa51e605e0ccc1c83, entries=150, sequenceid=367, filesize=12.0 K 2024-11-20T22:25:01,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d8735216ead04bcab6098e83dce8aff8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d8735216ead04bcab6098e83dce8aff8 2024-11-20T22:25:01,824 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d8735216ead04bcab6098e83dce8aff8, entries=150, sequenceid=367, filesize=12.0 K 2024-11-20T22:25:01,826 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=0 B/0 for 5f1e2878eea2034576ba469d1952fe84 in 708ms, sequenceid=367, compaction requested=false 2024-11-20T22:25:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-20T22:25:01,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-20T22:25:01,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-20T22:25:01,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6430 sec 2024-11-20T22:25:01,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.6460 sec 2024-11-20T22:25:01,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:01,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:01,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:25:01,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:25:01,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:25:01,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:01,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/f29a9a75f36a4979b1b538eaec510450 is 50, key is test_row_0/A:col10/1732141501925/Put/seqid=0 2024-11-20T22:25:02,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742165_1341 (size=12301) 2024-11-20T22:25:02,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141562027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141562035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141562035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141562047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,133 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/55ab26023d17415bbcf4a324e0724951 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/55ab26023d17415bbcf4a324e0724951 2024-11-20T22:25:02,144 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into 55ab26023d17415bbcf4a324e0724951(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:02,144 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:02,144 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=12, startTime=1732141501115; duration=0sec 2024-11-20T22:25:02,145 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:02,145 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:25:02,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141562151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141562151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141562152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141562160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T22:25:02,300 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T22:25:02,307 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:02,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-20T22:25:02,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:25:02,309 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:02,309 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:02,309 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:02,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141562357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141562358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141562363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141562369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:25:02,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/f29a9a75f36a4979b1b538eaec510450 2024-11-20T22:25:02,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/93d3d968d29e4bcca0b46ce7c3c3a17e is 50, key is test_row_0/B:col10/1732141501925/Put/seqid=0 2024-11-20T22:25:02,461 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:25:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:02,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:02,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742166_1342 (size=12301) 2024-11-20T22:25:02,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/93d3d968d29e4bcca0b46ce7c3c3a17e 2024-11-20T22:25:02,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/58e08953d7094069b3c8e109cce8e442 is 50, key is test_row_0/C:col10/1732141501925/Put/seqid=0 2024-11-20T22:25:02,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742167_1343 (size=12301) 2024-11-20T22:25:02,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:25:02,614 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:25:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:02,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141562664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141562664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141562668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:02,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141562679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,778 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:25:02,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:02,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:02,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:02,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:25:02,931 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:02,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:25:02,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:02,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:02,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:02,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:02,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/58e08953d7094069b3c8e109cce8e442 2024-11-20T22:25:02,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/f29a9a75f36a4979b1b538eaec510450 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f29a9a75f36a4979b1b538eaec510450 2024-11-20T22:25:02,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f29a9a75f36a4979b1b538eaec510450, entries=150, sequenceid=382, filesize=12.0 K 2024-11-20T22:25:02,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/93d3d968d29e4bcca0b46ce7c3c3a17e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/93d3d968d29e4bcca0b46ce7c3c3a17e 2024-11-20T22:25:02,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/93d3d968d29e4bcca0b46ce7c3c3a17e, entries=150, sequenceid=382, filesize=12.0 K 2024-11-20T22:25:02,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/58e08953d7094069b3c8e109cce8e442 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/58e08953d7094069b3c8e109cce8e442 2024-11-20T22:25:02,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/58e08953d7094069b3c8e109cce8e442, entries=150, sequenceid=382, filesize=12.0 K 2024-11-20T22:25:03,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 5f1e2878eea2034576ba469d1952fe84 in 1067ms, sequenceid=382, compaction requested=true 2024-11-20T22:25:03,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:03,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:03,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:03,000 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:03,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:03,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:03,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:03,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:03,001 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:03,002 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:03,002 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:03,002 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:25:03,002 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:25:03,002 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:03,002 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/d1754d65faf24e2a9854a74b3b913109, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/fe560b9270b6455aa51e605e0ccc1c83, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/93d3d968d29e4bcca0b46ce7c3c3a17e] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.8 K 2024-11-20T22:25:03,002 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:03,002 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e70b5bbbf4f645b4ababdd24628fbfd2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dca88a6b60ad4039b74da1a6e47b6e8f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f29a9a75f36a4979b1b538eaec510450] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.8 K 2024-11-20T22:25:03,002 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d1754d65faf24e2a9854a74b3b913109, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141499567 2024-11-20T22:25:03,004 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e70b5bbbf4f645b4ababdd24628fbfd2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141499567 2024-11-20T22:25:03,004 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fe560b9270b6455aa51e605e0ccc1c83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732141499700 2024-11-20T22:25:03,004 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting dca88a6b60ad4039b74da1a6e47b6e8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732141499700 2024-11-20T22:25:03,005 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 93d3d968d29e4bcca0b46ce7c3c3a17e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732141501872 2024-11-20T22:25:03,005 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f29a9a75f36a4979b1b538eaec510450, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732141501872 2024-11-20T22:25:03,019 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#288 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:03,020 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e590a9c2026540488d3fdd2ad7adbdfa is 50, key is test_row_0/B:col10/1732141501925/Put/seqid=0 2024-11-20T22:25:03,023 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#289 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:03,024 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/f12a1072809142c585abbd736c224388 is 50, key is test_row_0/A:col10/1732141501925/Put/seqid=0 2024-11-20T22:25:03,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742168_1344 (size=13221) 2024-11-20T22:25:03,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T22:25:03,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:03,091 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:03,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:25:03,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:25:03,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:25:03,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742169_1345 (size=13221) 2024-11-20T22:25:03,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/b4ea5e7fc94847868bd9e70481e0739a is 50, key is test_row_0/A:col10/1732141502025/Put/seqid=0 2024-11-20T22:25:03,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742170_1346 (size=12301) 2024-11-20T22:25:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:03,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:03,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141563200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141563201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141563204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141563204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141563309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141563309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141563310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141563310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:25:03,454 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e590a9c2026540488d3fdd2ad7adbdfa as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e590a9c2026540488d3fdd2ad7adbdfa 2024-11-20T22:25:03,461 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into e590a9c2026540488d3fdd2ad7adbdfa(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:03,461 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:03,461 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=13, startTime=1732141503000; duration=0sec 2024-11-20T22:25:03,462 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:03,462 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:25:03,462 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:03,464 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:03,464 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:25:03,464 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:03,464 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/55ab26023d17415bbcf4a324e0724951, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d8735216ead04bcab6098e83dce8aff8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/58e08953d7094069b3c8e109cce8e442] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.8 K 2024-11-20T22:25:03,464 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 55ab26023d17415bbcf4a324e0724951, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141499567 2024-11-20T22:25:03,465 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d8735216ead04bcab6098e83dce8aff8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732141499700 2024-11-20T22:25:03,465 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 58e08953d7094069b3c8e109cce8e442, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732141501872 2024-11-20T22:25:03,476 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#291 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:03,477 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/15f23a7b069a4816ad0d1cdb04777cff is 50, key is test_row_0/C:col10/1732141501925/Put/seqid=0 2024-11-20T22:25:03,501 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/f12a1072809142c585abbd736c224388 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f12a1072809142c585abbd736c224388 2024-11-20T22:25:03,510 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into f12a1072809142c585abbd736c224388(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:03,510 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:03,510 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=13, startTime=1732141503000; duration=0sec 2024-11-20T22:25:03,510 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:03,510 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:25:03,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742171_1347 (size=13221) 2024-11-20T22:25:03,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141563518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141563521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141563522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141563522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,525 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/15f23a7b069a4816ad0d1cdb04777cff as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/15f23a7b069a4816ad0d1cdb04777cff 2024-11-20T22:25:03,533 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into 15f23a7b069a4816ad0d1cdb04777cff(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:03,533 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:03,533 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=13, startTime=1732141503000; duration=0sec 2024-11-20T22:25:03,533 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:03,533 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:25:03,583 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/b4ea5e7fc94847868bd9e70481e0739a 2024-11-20T22:25:03,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/73c857e6530841c49bdf465bc83263ad is 50, key is test_row_0/B:col10/1732141502025/Put/seqid=0 2024-11-20T22:25:03,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742172_1348 (size=12301) 2024-11-20T22:25:03,643 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/73c857e6530841c49bdf465bc83263ad 2024-11-20T22:25:03,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d1d56b6e0afd4c4d854d9a1a0c8d574d is 50, key is test_row_0/C:col10/1732141502025/Put/seqid=0 2024-11-20T22:25:03,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742173_1349 (size=12301) 2024-11-20T22:25:03,699 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d1d56b6e0afd4c4d854d9a1a0c8d574d 2024-11-20T22:25:03,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/b4ea5e7fc94847868bd9e70481e0739a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b4ea5e7fc94847868bd9e70481e0739a 2024-11-20T22:25:03,735 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b4ea5e7fc94847868bd9e70481e0739a, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T22:25:03,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/73c857e6530841c49bdf465bc83263ad as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/73c857e6530841c49bdf465bc83263ad 2024-11-20T22:25:03,754 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/73c857e6530841c49bdf465bc83263ad, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T22:25:03,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/d1d56b6e0afd4c4d854d9a1a0c8d574d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d1d56b6e0afd4c4d854d9a1a0c8d574d 2024-11-20T22:25:03,769 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d1d56b6e0afd4c4d854d9a1a0c8d574d, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T22:25:03,770 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5f1e2878eea2034576ba469d1952fe84 in 678ms, sequenceid=407, compaction requested=false 2024-11-20T22:25:03,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:03,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:03,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-20T22:25:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-20T22:25:03,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T22:25:03,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4620 sec 2024-11-20T22:25:03,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.4670 sec 2024-11-20T22:25:03,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:03,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:03,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:25:03,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:25:03,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:25:03,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:03,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/9f8e5491e2d04ffb90e2f228ceb48dc5 is 50, key is test_row_0/A:col10/1732141503826/Put/seqid=0 2024-11-20T22:25:03,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742174_1350 (size=12301) 2024-11-20T22:25:03,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/9f8e5491e2d04ffb90e2f228ceb48dc5 2024-11-20T22:25:03,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141563882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141563883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141563885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141563891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/50cfb2282f60471c9f1fcaa0b9951777 is 50, key is test_row_0/B:col10/1732141503826/Put/seqid=0 2024-11-20T22:25:03,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742175_1351 (size=12301) 2024-11-20T22:25:03,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141563992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141563992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:03,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141563993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141563997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141564199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141564199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141564199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141564209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/50cfb2282f60471c9f1fcaa0b9951777 2024-11-20T22:25:04,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2bdd3e249ccc480ea1e302c68f456313 is 50, key is test_row_0/C:col10/1732141503826/Put/seqid=0 2024-11-20T22:25:04,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T22:25:04,416 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-20T22:25:04,417 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:04,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T22:25:04,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:25:04,422 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:04,423 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:04,423 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:04,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742176_1352 (size=12301) 2024-11-20T22:25:04,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2bdd3e249ccc480ea1e302c68f456313 2024-11-20T22:25:04,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/9f8e5491e2d04ffb90e2f228ceb48dc5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/9f8e5491e2d04ffb90e2f228ceb48dc5 2024-11-20T22:25:04,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/9f8e5491e2d04ffb90e2f228ceb48dc5, entries=150, sequenceid=423, filesize=12.0 K 2024-11-20T22:25:04,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/50cfb2282f60471c9f1fcaa0b9951777 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/50cfb2282f60471c9f1fcaa0b9951777 2024-11-20T22:25:04,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/50cfb2282f60471c9f1fcaa0b9951777, entries=150, sequenceid=423, filesize=12.0 K 2024-11-20T22:25:04,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/2bdd3e249ccc480ea1e302c68f456313 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2bdd3e249ccc480ea1e302c68f456313 2024-11-20T22:25:04,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2bdd3e249ccc480ea1e302c68f456313, entries=150, sequenceid=423, filesize=12.0 K 2024-11-20T22:25:04,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 5f1e2878eea2034576ba469d1952fe84 in 640ms, sequenceid=423, compaction requested=true 2024-11-20T22:25:04,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:04,471 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:04,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:04,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:04,471 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:04,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:04,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:04,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:04,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:04,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:04,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:25:04,472 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,473 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e590a9c2026540488d3fdd2ad7adbdfa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/73c857e6530841c49bdf465bc83263ad, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/50cfb2282f60471c9f1fcaa0b9951777] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.9 K 2024-11-20T22:25:04,473 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:04,473 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:25:04,473 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,473 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f12a1072809142c585abbd736c224388, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b4ea5e7fc94847868bd9e70481e0739a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/9f8e5491e2d04ffb90e2f228ceb48dc5] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.9 K 2024-11-20T22:25:04,475 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e590a9c2026540488d3fdd2ad7adbdfa, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732141501872 2024-11-20T22:25:04,476 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f12a1072809142c585abbd736c224388, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732141501872 2024-11-20T22:25:04,476 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 73c857e6530841c49bdf465bc83263ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732141502025 2024-11-20T22:25:04,476 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4ea5e7fc94847868bd9e70481e0739a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732141502025 2024-11-20T22:25:04,477 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 50cfb2282f60471c9f1fcaa0b9951777, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732141503200 2024-11-20T22:25:04,477 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f8e5491e2d04ffb90e2f228ceb48dc5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732141503200 2024-11-20T22:25:04,485 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:04,485 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/8749a644dbf647e7901597fc0df55eea is 50, key is test_row_0/A:col10/1732141503826/Put/seqid=0 2024-11-20T22:25:04,492 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:04,493 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/eab1c0d5ad3a40fab1c258980d874f48 is 50, key is test_row_0/B:col10/1732141503826/Put/seqid=0 2024-11-20T22:25:04,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:04,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:25:04,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:04,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:25:04,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:04,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:25:04,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:04,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:04,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:25:04,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742178_1354 (size=13323) 2024-11-20T22:25:04,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742177_1353 (size=13323) 2024-11-20T22:25:04,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/e49bd412dab34293a6e3aca58b7a2bda is 50, key is test_row_0/A:col10/1732141504511/Put/seqid=0 2024-11-20T22:25:04,540 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/eab1c0d5ad3a40fab1c258980d874f48 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/eab1c0d5ad3a40fab1c258980d874f48 2024-11-20T22:25:04,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141564536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141564538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141564538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,546 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into eab1c0d5ad3a40fab1c258980d874f48(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:04,546 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:04,547 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=13, startTime=1732141504471; duration=0sec 2024-11-20T22:25:04,547 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:04,547 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:25:04,547 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:04,548 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:04,548 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:25:04,548 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,548 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/15f23a7b069a4816ad0d1cdb04777cff, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d1d56b6e0afd4c4d854d9a1a0c8d574d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2bdd3e249ccc480ea1e302c68f456313] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=36.9 K 2024-11-20T22:25:04,555 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 15f23a7b069a4816ad0d1cdb04777cff, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732141501872 2024-11-20T22:25:04,558 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d1d56b6e0afd4c4d854d9a1a0c8d574d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732141502025 2024-11-20T22:25:04,558 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bdd3e249ccc480ea1e302c68f456313, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732141503200 2024-11-20T22:25:04,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742179_1355 (size=14741) 2024-11-20T22:25:04,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141564550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/e49bd412dab34293a6e3aca58b7a2bda 2024-11-20T22:25:04,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e2758bcfee3f4189a7dec6ab5148b8a7 is 50, key is test_row_0/B:col10/1732141504511/Put/seqid=0 2024-11-20T22:25:04,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,575 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#301 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:04,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:04,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:04,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,576 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/28be9ee5200a477b9529278d8a7056c1 is 50, key is test_row_0/C:col10/1732141503826/Put/seqid=0 2024-11-20T22:25:04,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742180_1356 (size=12301) 2024-11-20T22:25:04,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e2758bcfee3f4189a7dec6ab5148b8a7 2024-11-20T22:25:04,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/fb38ed12412946c29c96dc0ff276a7e5 is 50, key is test_row_0/C:col10/1732141504511/Put/seqid=0 2024-11-20T22:25:04,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141564645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141564646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141564646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742181_1357 (size=13323) 2024-11-20T22:25:04,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141564663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742182_1358 (size=12301) 2024-11-20T22:25:04,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:25:04,734 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:04,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:04,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141564855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141564855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141564856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:04,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141564871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,894 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:04,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:04,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:04,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:04,900 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:04,933 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/8749a644dbf647e7901597fc0df55eea as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8749a644dbf647e7901597fc0df55eea 2024-11-20T22:25:04,947 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into 8749a644dbf647e7901597fc0df55eea(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:04,947 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:04,947 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=13, startTime=1732141504471; duration=0sec 2024-11-20T22:25:04,947 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:04,947 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:25:05,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:25:05,053 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:05,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:05,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,054 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,064 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/28be9ee5200a477b9529278d8a7056c1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/28be9ee5200a477b9529278d8a7056c1 2024-11-20T22:25:05,071 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into 28be9ee5200a477b9529278d8a7056c1(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:05,071 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:05,071 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=13, startTime=1732141504471; duration=0sec 2024-11-20T22:25:05,071 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:05,071 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:25:05,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/fb38ed12412946c29c96dc0ff276a7e5 2024-11-20T22:25:05,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/e49bd412dab34293a6e3aca58b7a2bda as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e49bd412dab34293a6e3aca58b7a2bda 2024-11-20T22:25:05,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e49bd412dab34293a6e3aca58b7a2bda, entries=200, sequenceid=448, filesize=14.4 K 2024-11-20T22:25:05,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e2758bcfee3f4189a7dec6ab5148b8a7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e2758bcfee3f4189a7dec6ab5148b8a7 2024-11-20T22:25:05,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e2758bcfee3f4189a7dec6ab5148b8a7, entries=150, sequenceid=448, filesize=12.0 K 2024-11-20T22:25:05,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/fb38ed12412946c29c96dc0ff276a7e5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fb38ed12412946c29c96dc0ff276a7e5 2024-11-20T22:25:05,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141565160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141565160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fb38ed12412946c29c96dc0ff276a7e5, entries=150, sequenceid=448, filesize=12.0 K 2024-11-20T22:25:05,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 5f1e2878eea2034576ba469d1952fe84 in 667ms, sequenceid=448, compaction requested=false 2024-11-20T22:25:05,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:05,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141565160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:25:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:25:05,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:25:05,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:25:05,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:05,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:05,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/14342275661049e4b7f06b1cd1c246c3 is 50, key is test_row_0/A:col10/1732141504537/Put/seqid=0 2024-11-20T22:25:05,206 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:05,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:05,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,209 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742183_1359 (size=14741) 2024-11-20T22:25:05,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141565340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,363 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:05,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:05,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141565445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,521 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:05,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:05,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:25:05,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/14342275661049e4b7f06b1cd1c246c3 2024-11-20T22:25:05,660 DEBUG [Thread-1215 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d32f3f9 to 127.0.0.1:51916 2024-11-20T22:25:05,660 DEBUG [Thread-1215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:05,662 DEBUG [Thread-1213 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x73db2730 to 127.0.0.1:51916 2024-11-20T22:25:05,662 DEBUG [Thread-1213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:05,665 DEBUG [Thread-1217 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x16092bcc to 127.0.0.1:51916 2024-11-20T22:25:05,665 DEBUG [Thread-1217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:05,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141565656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,666 DEBUG [Thread-1209 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f2cdd73 to 127.0.0.1:51916 2024-11-20T22:25:05,666 DEBUG [Thread-1209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:05,667 DEBUG [Thread-1211 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x35ef38cd to 127.0.0.1:51916 2024-11-20T22:25:05,667 DEBUG [Thread-1211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:05,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e48222859eb245b5af55dc38fb23852e is 50, key is test_row_0/B:col10/1732141504537/Put/seqid=0 2024-11-20T22:25:05,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,673 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49200 deadline: 1732141565673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49224 deadline: 1732141565673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:05,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:05,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49220 deadline: 1732141565683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742184_1360 (size=12301) 2024-11-20T22:25:05,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e48222859eb245b5af55dc38fb23852e 2024-11-20T22:25:05,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b is 50, key is test_row_0/C:col10/1732141504537/Put/seqid=0 2024-11-20T22:25:05,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742185_1361 (size=12301) 2024-11-20T22:25:05,826 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:05,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:05,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49248 deadline: 1732141565975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,980 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:05,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:05,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:05,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:05,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:05,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,137 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:06,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:06,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:06,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. as already flushing 2024-11-20T22:25:06,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:06,139 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:06,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b 2024-11-20T22:25:06,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/14342275661049e4b7f06b1cd1c246c3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/14342275661049e4b7f06b1cd1c246c3 2024-11-20T22:25:06,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/14342275661049e4b7f06b1cd1c246c3, entries=200, sequenceid=465, filesize=14.4 K 2024-11-20T22:25:06,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e48222859eb245b5af55dc38fb23852e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e48222859eb245b5af55dc38fb23852e 2024-11-20T22:25:06,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e48222859eb245b5af55dc38fb23852e, entries=150, sequenceid=465, filesize=12.0 K 2024-11-20T22:25:06,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b 2024-11-20T22:25:06,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b, entries=150, sequenceid=465, filesize=12.0 K 2024-11-20T22:25:06,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 5f1e2878eea2034576ba469d1952fe84 in 1065ms, sequenceid=465, compaction requested=true 2024-11-20T22:25:06,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:06,252 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:06,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:06,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:06,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:06,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:06,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f1e2878eea2034576ba469d1952fe84:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:06,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:06,254 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:06,254 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42805 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:06,254 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/A is initiating minor compaction (all files) 2024-11-20T22:25:06,254 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/A in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:06,254 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8749a644dbf647e7901597fc0df55eea, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e49bd412dab34293a6e3aca58b7a2bda, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/14342275661049e4b7f06b1cd1c246c3] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=41.8 K 2024-11-20T22:25:06,255 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8749a644dbf647e7901597fc0df55eea, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732141503200 2024-11-20T22:25:06,256 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e49bd412dab34293a6e3aca58b7a2bda, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732141503877 2024-11-20T22:25:06,257 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:06,257 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/B is initiating minor compaction (all files) 2024-11-20T22:25:06,257 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/B in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:06,257 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/eab1c0d5ad3a40fab1c258980d874f48, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e2758bcfee3f4189a7dec6ab5148b8a7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e48222859eb245b5af55dc38fb23852e] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=37.0 K 2024-11-20T22:25:06,262 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting eab1c0d5ad3a40fab1c258980d874f48, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732141503200 2024-11-20T22:25:06,262 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14342275661049e4b7f06b1cd1c246c3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1732141504526 2024-11-20T22:25:06,263 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e2758bcfee3f4189a7dec6ab5148b8a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732141503877 2024-11-20T22:25:06,263 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e48222859eb245b5af55dc38fb23852e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1732141504526 2024-11-20T22:25:06,274 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#B#compaction#306 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:06,275 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#A#compaction#307 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:06,275 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/cc98f537fb0341b48639b424444c8d96 is 50, key is test_row_0/B:col10/1732141504537/Put/seqid=0 2024-11-20T22:25:06,275 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/aee6db6e226d4798815998adc5efbe0c is 50, key is test_row_0/A:col10/1732141504537/Put/seqid=0 2024-11-20T22:25:06,298 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:06,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T22:25:06,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:06,298 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:25:06,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:25:06,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:06,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:25:06,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:06,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:25:06,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:06,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742187_1363 (size=13425) 2024-11-20T22:25:06,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/ba17e182bd3c4b9aaa02c7709e8acb97 is 50, key is test_row_0/A:col10/1732141505335/Put/seqid=0 2024-11-20T22:25:06,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742186_1362 (size=13425) 2024-11-20T22:25:06,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742188_1364 (size=12301) 2024-11-20T22:25:06,353 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/ba17e182bd3c4b9aaa02c7709e8acb97 2024-11-20T22:25:06,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e902dc0220a04123a10f098b2e460d1d is 50, key is test_row_0/B:col10/1732141505335/Put/seqid=0 2024-11-20T22:25:06,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742189_1365 (size=12301) 2024-11-20T22:25:06,366 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e902dc0220a04123a10f098b2e460d1d 2024-11-20T22:25:06,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/4b23adfaaf764664b29a20c74efb43a5 is 50, key is test_row_0/C:col10/1732141505335/Put/seqid=0 2024-11-20T22:25:06,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742190_1366 (size=12301) 2024-11-20T22:25:06,392 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/4b23adfaaf764664b29a20c74efb43a5 2024-11-20T22:25:06,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/ba17e182bd3c4b9aaa02c7709e8acb97 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ba17e182bd3c4b9aaa02c7709e8acb97 2024-11-20T22:25:06,402 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ba17e182bd3c4b9aaa02c7709e8acb97, entries=150, sequenceid=487, filesize=12.0 K 2024-11-20T22:25:06,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/e902dc0220a04123a10f098b2e460d1d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e902dc0220a04123a10f098b2e460d1d 2024-11-20T22:25:06,412 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e902dc0220a04123a10f098b2e460d1d, entries=150, sequenceid=487, filesize=12.0 K 2024-11-20T22:25:06,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/4b23adfaaf764664b29a20c74efb43a5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4b23adfaaf764664b29a20c74efb43a5 2024-11-20T22:25:06,418 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4b23adfaaf764664b29a20c74efb43a5, entries=150, sequenceid=487, filesize=12.0 K 2024-11-20T22:25:06,418 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 5f1e2878eea2034576ba469d1952fe84 in 120ms, sequenceid=487, compaction requested=true 2024-11-20T22:25:06,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:06,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:06,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T22:25:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T22:25:06,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T22:25:06,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0000 sec 2024-11-20T22:25:06,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.0080 sec 2024-11-20T22:25:06,492 DEBUG [Thread-1206 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32416934 to 127.0.0.1:51916 2024-11-20T22:25:06,492 DEBUG [Thread-1206 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:06,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T22:25:06,528 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T22:25:06,678 DEBUG [Thread-1204 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d679835 to 127.0.0.1:51916 2024-11-20T22:25:06,678 DEBUG [Thread-1204 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:06,680 DEBUG [Thread-1202 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x165a32db to 127.0.0.1:51916 2024-11-20T22:25:06,680 DEBUG [Thread-1202 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:06,686 DEBUG [Thread-1200 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cdbac5f to 127.0.0.1:51916 2024-11-20T22:25:06,687 DEBUG [Thread-1200 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:06,733 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/aee6db6e226d4798815998adc5efbe0c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/aee6db6e226d4798815998adc5efbe0c 2024-11-20T22:25:06,743 DEBUG [Thread-1198 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x033c112c to 127.0.0.1:51916 2024-11-20T22:25:06,743 DEBUG [Thread-1198 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:06,743 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 113 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 98 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3282 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3221 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3170 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3272 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3166 2024-11-20T22:25:06,744 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:25:06,744 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:25:06,744 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d72231b to 127.0.0.1:51916 2024-11-20T22:25:06,744 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:06,746 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:25:06,748 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:25:06,749 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/cc98f537fb0341b48639b424444c8d96 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc98f537fb0341b48639b424444c8d96 2024-11-20T22:25:06,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:06,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:25:06,762 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141506762"}]},"ts":"1732141506762"} 2024-11-20T22:25:06,767 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:25:06,768 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/B of 5f1e2878eea2034576ba469d1952fe84 into cc98f537fb0341b48639b424444c8d96(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:06,768 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:06,768 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/B, priority=13, startTime=1732141506252; duration=0sec 2024-11-20T22:25:06,768 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:06,768 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:B 2024-11-20T22:25:06,768 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:06,771 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:06,771 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 5f1e2878eea2034576ba469d1952fe84/C is initiating minor compaction (all files) 2024-11-20T22:25:06,771 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f1e2878eea2034576ba469d1952fe84/C in TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:06,771 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/28be9ee5200a477b9529278d8a7056c1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fb38ed12412946c29c96dc0ff276a7e5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4b23adfaaf764664b29a20c74efb43a5] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp, totalSize=49.0 K 2024-11-20T22:25:06,771 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/A of 5f1e2878eea2034576ba469d1952fe84 into aee6db6e226d4798815998adc5efbe0c(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:06,771 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:06,771 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/A, priority=13, startTime=1732141506251; duration=0sec 2024-11-20T22:25:06,771 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:06,772 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:A 2024-11-20T22:25:06,773 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 28be9ee5200a477b9529278d8a7056c1, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732141503200 2024-11-20T22:25:06,773 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fb38ed12412946c29c96dc0ff276a7e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732141503877 2024-11-20T22:25:06,774 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c4bd7f806ed4ceb9d1785bb3bd91c6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1732141504526 2024-11-20T22:25:06,774 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b23adfaaf764664b29a20c74efb43a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732141505318 2024-11-20T22:25:06,797 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f1e2878eea2034576ba469d1952fe84#C#compaction#311 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:06,797 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/a01b991ab0a94b9584cb0e9234136ac4 is 50, key is test_row_0/C:col10/1732141505335/Put/seqid=0 2024-11-20T22:25:06,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742191_1367 (size=13459) 2024-11-20T22:25:06,823 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/a01b991ab0a94b9584cb0e9234136ac4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/a01b991ab0a94b9584cb0e9234136ac4 2024-11-20T22:25:06,828 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5f1e2878eea2034576ba469d1952fe84/C of 5f1e2878eea2034576ba469d1952fe84 into a01b991ab0a94b9584cb0e9234136ac4(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:06,828 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:06,828 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84., storeName=5f1e2878eea2034576ba469d1952fe84/C, priority=12, startTime=1732141506252; duration=0sec 2024-11-20T22:25:06,828 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:06,828 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f1e2878eea2034576ba469d1952fe84:C 2024-11-20T22:25:06,835 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:25:06,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:25:06,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f1e2878eea2034576ba469d1952fe84, UNASSIGN}] 2024-11-20T22:25:06,838 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=85, ppid=84, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f1e2878eea2034576ba469d1952fe84, UNASSIGN 2024-11-20T22:25:06,838 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=85 updating hbase:meta row=5f1e2878eea2034576ba469d1952fe84, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:06,847 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:25:06,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; CloseRegionProcedure 5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:25:06,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:25:07,001 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:07,002 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(124): Close 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:07,002 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:25:07,002 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1681): Closing 5f1e2878eea2034576ba469d1952fe84, disabling compactions & flushes 2024-11-20T22:25:07,002 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:07,002 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:07,002 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. after waiting 0 ms 2024-11-20T22:25:07,002 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:07,002 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(2837): Flushing 5f1e2878eea2034576ba469d1952fe84 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:25:07,003 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=A 2024-11-20T22:25:07,003 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:07,003 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=B 2024-11-20T22:25:07,003 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:07,003 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f1e2878eea2034576ba469d1952fe84, store=C 2024-11-20T22:25:07,003 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:07,006 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/abaaf02b8b204276a4c58899f78740b1 is 50, key is test_row_0/A:col10/1732141506678/Put/seqid=0 2024-11-20T22:25:07,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742192_1368 (size=12301) 2024-11-20T22:25:07,012 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/abaaf02b8b204276a4c58899f78740b1 2024-11-20T22:25:07,018 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0ede91e4fcfe4ad1a403a0d0afdc0171 is 50, key is test_row_0/B:col10/1732141506678/Put/seqid=0 2024-11-20T22:25:07,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742193_1369 (size=12301) 2024-11-20T22:25:07,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:25:07,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:25:07,430 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0ede91e4fcfe4ad1a403a0d0afdc0171 2024-11-20T22:25:07,444 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e81d0837b1b34e12864aed34d5c9248f is 50, key is test_row_0/C:col10/1732141506678/Put/seqid=0 2024-11-20T22:25:07,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742194_1370 (size=12301) 2024-11-20T22:25:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:25:07,864 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e81d0837b1b34e12864aed34d5c9248f 2024-11-20T22:25:07,869 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/A/abaaf02b8b204276a4c58899f78740b1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/abaaf02b8b204276a4c58899f78740b1 2024-11-20T22:25:07,873 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/abaaf02b8b204276a4c58899f78740b1, entries=150, sequenceid=498, filesize=12.0 K 2024-11-20T22:25:07,874 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/B/0ede91e4fcfe4ad1a403a0d0afdc0171 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0ede91e4fcfe4ad1a403a0d0afdc0171 2024-11-20T22:25:07,878 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0ede91e4fcfe4ad1a403a0d0afdc0171, entries=150, sequenceid=498, filesize=12.0 K 2024-11-20T22:25:07,879 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/.tmp/C/e81d0837b1b34e12864aed34d5c9248f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e81d0837b1b34e12864aed34d5c9248f 2024-11-20T22:25:07,883 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e81d0837b1b34e12864aed34d5c9248f, entries=150, sequenceid=498, filesize=12.0 K 2024-11-20T22:25:07,885 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 5f1e2878eea2034576ba469d1952fe84 in 883ms, sequenceid=498, compaction requested=true 2024-11-20T22:25:07,895 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/48a449b6c7be41a998a64aa3df424dcc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d94b94443a67423fbe7be6968534af37, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e287cab04dee4b87813babc4946884b6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/4855ec64aa3a4e849357152f4714fa42, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/0be0913081834c738459b993ef7a7b36, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/21677a04495548e0af26dd598cb26a2a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/858420f45528492d889fda6b7b33d172, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ac4601de858b4e068ea5e7d7c3a366c3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/471fd453e75b44869d965d1f5cb40d5a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6a0d4d205e49499c83be324d244f32dd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/db0a6429d9da43639dace47c16a763dd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/35a0d6823e4c4ce6812e93e2cb766afc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/45c54b122cb2444bb213570e04eaaf06, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/82c8a64ef119446ba4eafe1946bc7ea8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/7dfca359c7334320b8e42dcc67fa0caa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/1dd72a35427b4dcea4f9e3d9b9742e0a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b172edc6c8ac4f2c9724f1ebf2cb52c5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8dc649759cf64188b1adf47127b84fb4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/c0da6d0173b3438d9bf64cc084dec51f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d026d8ae45904d0e87079235f5da2505, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6f57bcb47f73487f8399bc3409b7d2e2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/cc4756ef8552481b8de477e61dad3dd0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/26b11084430d4238830cbf78116dcc25, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e70b5bbbf4f645b4ababdd24628fbfd2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dca88a6b60ad4039b74da1a6e47b6e8f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f12a1072809142c585abbd736c224388, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f29a9a75f36a4979b1b538eaec510450, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b4ea5e7fc94847868bd9e70481e0739a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8749a644dbf647e7901597fc0df55eea, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/9f8e5491e2d04ffb90e2f228ceb48dc5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e49bd412dab34293a6e3aca58b7a2bda, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/14342275661049e4b7f06b1cd1c246c3] to archive 2024-11-20T22:25:07,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:07,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/48a449b6c7be41a998a64aa3df424dcc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/48a449b6c7be41a998a64aa3df424dcc 2024-11-20T22:25:07,900 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d94b94443a67423fbe7be6968534af37 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d94b94443a67423fbe7be6968534af37 2024-11-20T22:25:07,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e287cab04dee4b87813babc4946884b6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e287cab04dee4b87813babc4946884b6 2024-11-20T22:25:07,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/4855ec64aa3a4e849357152f4714fa42 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/4855ec64aa3a4e849357152f4714fa42 2024-11-20T22:25:07,905 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/0be0913081834c738459b993ef7a7b36 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/0be0913081834c738459b993ef7a7b36 2024-11-20T22:25:07,907 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/21677a04495548e0af26dd598cb26a2a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/21677a04495548e0af26dd598cb26a2a 2024-11-20T22:25:07,910 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/858420f45528492d889fda6b7b33d172 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/858420f45528492d889fda6b7b33d172 2024-11-20T22:25:07,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dfaf9f3cd98b4bfa90ce83ccc2cf9d81 2024-11-20T22:25:07,913 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ac4601de858b4e068ea5e7d7c3a366c3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ac4601de858b4e068ea5e7d7c3a366c3 2024-11-20T22:25:07,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/471fd453e75b44869d965d1f5cb40d5a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/471fd453e75b44869d965d1f5cb40d5a 2024-11-20T22:25:07,916 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6a0d4d205e49499c83be324d244f32dd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6a0d4d205e49499c83be324d244f32dd 2024-11-20T22:25:07,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/db0a6429d9da43639dace47c16a763dd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/db0a6429d9da43639dace47c16a763dd 2024-11-20T22:25:07,919 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/35a0d6823e4c4ce6812e93e2cb766afc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/35a0d6823e4c4ce6812e93e2cb766afc 2024-11-20T22:25:07,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/45c54b122cb2444bb213570e04eaaf06 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/45c54b122cb2444bb213570e04eaaf06 2024-11-20T22:25:07,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/82c8a64ef119446ba4eafe1946bc7ea8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/82c8a64ef119446ba4eafe1946bc7ea8 2024-11-20T22:25:07,923 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/7dfca359c7334320b8e42dcc67fa0caa to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/7dfca359c7334320b8e42dcc67fa0caa 2024-11-20T22:25:07,925 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/1dd72a35427b4dcea4f9e3d9b9742e0a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/1dd72a35427b4dcea4f9e3d9b9742e0a 2024-11-20T22:25:07,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b172edc6c8ac4f2c9724f1ebf2cb52c5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b172edc6c8ac4f2c9724f1ebf2cb52c5 2024-11-20T22:25:07,928 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8dc649759cf64188b1adf47127b84fb4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8dc649759cf64188b1adf47127b84fb4 2024-11-20T22:25:07,930 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/c0da6d0173b3438d9bf64cc084dec51f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/c0da6d0173b3438d9bf64cc084dec51f 2024-11-20T22:25:07,931 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d026d8ae45904d0e87079235f5da2505 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/d026d8ae45904d0e87079235f5da2505 2024-11-20T22:25:07,934 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6f57bcb47f73487f8399bc3409b7d2e2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/6f57bcb47f73487f8399bc3409b7d2e2 2024-11-20T22:25:07,936 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/cc4756ef8552481b8de477e61dad3dd0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/cc4756ef8552481b8de477e61dad3dd0 2024-11-20T22:25:07,938 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/26b11084430d4238830cbf78116dcc25 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/26b11084430d4238830cbf78116dcc25 2024-11-20T22:25:07,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e70b5bbbf4f645b4ababdd24628fbfd2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e70b5bbbf4f645b4ababdd24628fbfd2 2024-11-20T22:25:07,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dca88a6b60ad4039b74da1a6e47b6e8f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/dca88a6b60ad4039b74da1a6e47b6e8f 2024-11-20T22:25:07,943 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f12a1072809142c585abbd736c224388 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f12a1072809142c585abbd736c224388 2024-11-20T22:25:07,944 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f29a9a75f36a4979b1b538eaec510450 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/f29a9a75f36a4979b1b538eaec510450 2024-11-20T22:25:07,946 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b4ea5e7fc94847868bd9e70481e0739a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/b4ea5e7fc94847868bd9e70481e0739a 2024-11-20T22:25:07,948 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8749a644dbf647e7901597fc0df55eea to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/8749a644dbf647e7901597fc0df55eea 2024-11-20T22:25:07,949 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/9f8e5491e2d04ffb90e2f228ceb48dc5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/9f8e5491e2d04ffb90e2f228ceb48dc5 2024-11-20T22:25:07,951 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e49bd412dab34293a6e3aca58b7a2bda to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/e49bd412dab34293a6e3aca58b7a2bda 2024-11-20T22:25:07,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/14342275661049e4b7f06b1cd1c246c3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/14342275661049e4b7f06b1cd1c246c3 2024-11-20T22:25:07,954 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc5d4600020c4888937d940486c109df, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d42d550a99d43989b4a8e06c41a1b2f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/10684efff1954ed196dd431054c5cf1f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ab23ef6427874597b9f4af8121faef30, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/32c854cb05e448eb99f5a785244355a0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/507037353c5f4aa485c871ee27776012, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/c425134f582d4fedb124c3a14debab63, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/268065b24ea645ab8f7ff2d2014a0dc7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/7cd120569a5d4b5b9c8f2dc43fba8a06, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a4a4e8a3e13f4a51979f1319ee499774, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/68bdb1127e4a43af9c04887908bbf0f3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/df2ebd9e25fd43f5b3577eece3f0237f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/369c659bb53f4ebab3ad2d28af9b038c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/f3bd1361b49944c999d23089ba926bbc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/4a092db6241842058f9a0a07579a29e3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/9749af848e66429ebfc6a99cb0ed32d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/bcf50c3453704785b0e18985f60ea35d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/646d2fe5c49b411d809e70091247259a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d588eed3189440999d01ee2a942dd60, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/5a4579ddafaa48949ae31aae800d891f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ff6dfda31dc342a39fd2ce1b37b75010, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a62854ebebc140f4b20445db64681bb3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/40904a6239e24ad9908c2c0e2f1fdadc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/d1754d65faf24e2a9854a74b3b913109, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/3eca7fcbabad4f79bd1c95c1e81bd208, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/fe560b9270b6455aa51e605e0ccc1c83, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e590a9c2026540488d3fdd2ad7adbdfa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/93d3d968d29e4bcca0b46ce7c3c3a17e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/73c857e6530841c49bdf465bc83263ad, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/eab1c0d5ad3a40fab1c258980d874f48, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/50cfb2282f60471c9f1fcaa0b9951777, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e2758bcfee3f4189a7dec6ab5148b8a7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e48222859eb245b5af55dc38fb23852e] to archive 2024-11-20T22:25:07,955 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:07,957 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc5d4600020c4888937d940486c109df to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc5d4600020c4888937d940486c109df 2024-11-20T22:25:07,958 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d42d550a99d43989b4a8e06c41a1b2f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d42d550a99d43989b4a8e06c41a1b2f 2024-11-20T22:25:07,959 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/10684efff1954ed196dd431054c5cf1f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/10684efff1954ed196dd431054c5cf1f 2024-11-20T22:25:07,961 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ab23ef6427874597b9f4af8121faef30 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ab23ef6427874597b9f4af8121faef30 2024-11-20T22:25:07,962 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/32c854cb05e448eb99f5a785244355a0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/32c854cb05e448eb99f5a785244355a0 2024-11-20T22:25:07,964 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/507037353c5f4aa485c871ee27776012 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/507037353c5f4aa485c871ee27776012 2024-11-20T22:25:07,965 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/c425134f582d4fedb124c3a14debab63 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/c425134f582d4fedb124c3a14debab63 2024-11-20T22:25:07,967 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/268065b24ea645ab8f7ff2d2014a0dc7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/268065b24ea645ab8f7ff2d2014a0dc7 2024-11-20T22:25:07,970 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/7cd120569a5d4b5b9c8f2dc43fba8a06 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/7cd120569a5d4b5b9c8f2dc43fba8a06 2024-11-20T22:25:07,974 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a4a4e8a3e13f4a51979f1319ee499774 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a4a4e8a3e13f4a51979f1319ee499774 2024-11-20T22:25:07,976 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/68bdb1127e4a43af9c04887908bbf0f3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/68bdb1127e4a43af9c04887908bbf0f3 2024-11-20T22:25:07,977 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/df2ebd9e25fd43f5b3577eece3f0237f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/df2ebd9e25fd43f5b3577eece3f0237f 2024-11-20T22:25:07,979 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/369c659bb53f4ebab3ad2d28af9b038c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/369c659bb53f4ebab3ad2d28af9b038c 2024-11-20T22:25:07,981 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/f3bd1361b49944c999d23089ba926bbc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/f3bd1361b49944c999d23089ba926bbc 2024-11-20T22:25:07,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/4a092db6241842058f9a0a07579a29e3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/4a092db6241842058f9a0a07579a29e3 2024-11-20T22:25:07,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/9749af848e66429ebfc6a99cb0ed32d2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/9749af848e66429ebfc6a99cb0ed32d2 2024-11-20T22:25:07,984 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/bcf50c3453704785b0e18985f60ea35d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/bcf50c3453704785b0e18985f60ea35d 2024-11-20T22:25:07,986 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/646d2fe5c49b411d809e70091247259a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/646d2fe5c49b411d809e70091247259a 2024-11-20T22:25:07,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d588eed3189440999d01ee2a942dd60 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0d588eed3189440999d01ee2a942dd60 2024-11-20T22:25:07,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/5a4579ddafaa48949ae31aae800d891f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/5a4579ddafaa48949ae31aae800d891f 2024-11-20T22:25:07,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ff6dfda31dc342a39fd2ce1b37b75010 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/ff6dfda31dc342a39fd2ce1b37b75010 2024-11-20T22:25:07,990 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a62854ebebc140f4b20445db64681bb3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/a62854ebebc140f4b20445db64681bb3 2024-11-20T22:25:07,992 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/40904a6239e24ad9908c2c0e2f1fdadc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/40904a6239e24ad9908c2c0e2f1fdadc 2024-11-20T22:25:07,994 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/d1754d65faf24e2a9854a74b3b913109 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/d1754d65faf24e2a9854a74b3b913109 2024-11-20T22:25:07,995 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/3eca7fcbabad4f79bd1c95c1e81bd208 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/3eca7fcbabad4f79bd1c95c1e81bd208 2024-11-20T22:25:07,997 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/fe560b9270b6455aa51e605e0ccc1c83 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/fe560b9270b6455aa51e605e0ccc1c83 2024-11-20T22:25:07,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e590a9c2026540488d3fdd2ad7adbdfa to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e590a9c2026540488d3fdd2ad7adbdfa 2024-11-20T22:25:07,999 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/93d3d968d29e4bcca0b46ce7c3c3a17e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/93d3d968d29e4bcca0b46ce7c3c3a17e 2024-11-20T22:25:08,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/73c857e6530841c49bdf465bc83263ad to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/73c857e6530841c49bdf465bc83263ad 2024-11-20T22:25:08,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/eab1c0d5ad3a40fab1c258980d874f48 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/eab1c0d5ad3a40fab1c258980d874f48 2024-11-20T22:25:08,011 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/50cfb2282f60471c9f1fcaa0b9951777 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/50cfb2282f60471c9f1fcaa0b9951777 2024-11-20T22:25:08,012 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e2758bcfee3f4189a7dec6ab5148b8a7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e2758bcfee3f4189a7dec6ab5148b8a7 2024-11-20T22:25:08,013 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e48222859eb245b5af55dc38fb23852e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e48222859eb245b5af55dc38fb23852e 2024-11-20T22:25:08,019 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d5866786c86f49d4936cf3f0c59c0718, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/7024baac5d1740fca4d95ff715bb54ad, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5554b7f91ad94d6fb2cc09a546011188, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ba55bad020d346edb3d220171c33aef6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/b0874c8060e448e287dc77234ea2754a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/49bb991089b34fb98823291d5fa75552, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4a2e1c70cc834023904a02534b42d6d1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/518c9df8771a4a18b04101edab4e1ec0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e1696c1cf7094319a3f80bba80ec6ee3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/78dd60cd99e3480f9b3604da4d56dbd4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fec79bd8cea2410ba725ebd0d77e74e8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/57e4a0d1bf854806924b9d246355b2ab, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e8f59f256fa84ec094ce79f9e3799219, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ad35f59d091f4721b1989a82cf9f8afe, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5229f598ef33472882a2f00678adc7ab, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e4e4ee19b95149c7a72f91c2f5af647e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/1fd9fc65b5d54291a15610089d31b4f4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/18278b0fc0ed46e3982861aa6c266555, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/6c6b07b181654c3a98363a3e49138823, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/bb77dc10847141dc8855f97324eaa3b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/c789e6695f474a239d3922d575aa35f9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/39e42f113b9242199a15b08ef6f84505, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2ed08234110a4b308f1d5e663deba249, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/55ab26023d17415bbcf4a324e0724951, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2a5adb553adb456b8862980f792e8aae, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d8735216ead04bcab6098e83dce8aff8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/15f23a7b069a4816ad0d1cdb04777cff, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/58e08953d7094069b3c8e109cce8e442, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d1d56b6e0afd4c4d854d9a1a0c8d574d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/28be9ee5200a477b9529278d8a7056c1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2bdd3e249ccc480ea1e302c68f456313, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fb38ed12412946c29c96dc0ff276a7e5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4b23adfaaf764664b29a20c74efb43a5] to archive 2024-11-20T22:25:08,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:08,023 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d5866786c86f49d4936cf3f0c59c0718 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d5866786c86f49d4936cf3f0c59c0718 2024-11-20T22:25:08,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/7024baac5d1740fca4d95ff715bb54ad to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/7024baac5d1740fca4d95ff715bb54ad 2024-11-20T22:25:08,026 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5554b7f91ad94d6fb2cc09a546011188 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5554b7f91ad94d6fb2cc09a546011188 2024-11-20T22:25:08,027 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ba55bad020d346edb3d220171c33aef6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ba55bad020d346edb3d220171c33aef6 2024-11-20T22:25:08,029 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/b0874c8060e448e287dc77234ea2754a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/b0874c8060e448e287dc77234ea2754a 2024-11-20T22:25:08,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/49bb991089b34fb98823291d5fa75552 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/49bb991089b34fb98823291d5fa75552 2024-11-20T22:25:08,033 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4a2e1c70cc834023904a02534b42d6d1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4a2e1c70cc834023904a02534b42d6d1 2024-11-20T22:25:08,035 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/518c9df8771a4a18b04101edab4e1ec0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/518c9df8771a4a18b04101edab4e1ec0 2024-11-20T22:25:08,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e1696c1cf7094319a3f80bba80ec6ee3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e1696c1cf7094319a3f80bba80ec6ee3 2024-11-20T22:25:08,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/78dd60cd99e3480f9b3604da4d56dbd4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/78dd60cd99e3480f9b3604da4d56dbd4 2024-11-20T22:25:08,039 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fec79bd8cea2410ba725ebd0d77e74e8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fec79bd8cea2410ba725ebd0d77e74e8 2024-11-20T22:25:08,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/57e4a0d1bf854806924b9d246355b2ab to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/57e4a0d1bf854806924b9d246355b2ab 2024-11-20T22:25:08,042 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e8f59f256fa84ec094ce79f9e3799219 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e8f59f256fa84ec094ce79f9e3799219 2024-11-20T22:25:08,043 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ad35f59d091f4721b1989a82cf9f8afe to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/ad35f59d091f4721b1989a82cf9f8afe 2024-11-20T22:25:08,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5229f598ef33472882a2f00678adc7ab to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/5229f598ef33472882a2f00678adc7ab 2024-11-20T22:25:08,045 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e4e4ee19b95149c7a72f91c2f5af647e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e4e4ee19b95149c7a72f91c2f5af647e 2024-11-20T22:25:08,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/1fd9fc65b5d54291a15610089d31b4f4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/1fd9fc65b5d54291a15610089d31b4f4 2024-11-20T22:25:08,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/18278b0fc0ed46e3982861aa6c266555 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/18278b0fc0ed46e3982861aa6c266555 2024-11-20T22:25:08,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/6c6b07b181654c3a98363a3e49138823 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/6c6b07b181654c3a98363a3e49138823 2024-11-20T22:25:08,050 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/bb77dc10847141dc8855f97324eaa3b8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/bb77dc10847141dc8855f97324eaa3b8 2024-11-20T22:25:08,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/c789e6695f474a239d3922d575aa35f9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/c789e6695f474a239d3922d575aa35f9 2024-11-20T22:25:08,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/39e42f113b9242199a15b08ef6f84505 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/39e42f113b9242199a15b08ef6f84505 2024-11-20T22:25:08,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2ed08234110a4b308f1d5e663deba249 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2ed08234110a4b308f1d5e663deba249 2024-11-20T22:25:08,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/55ab26023d17415bbcf4a324e0724951 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/55ab26023d17415bbcf4a324e0724951 2024-11-20T22:25:08,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2a5adb553adb456b8862980f792e8aae to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2a5adb553adb456b8862980f792e8aae 2024-11-20T22:25:08,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d8735216ead04bcab6098e83dce8aff8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d8735216ead04bcab6098e83dce8aff8 2024-11-20T22:25:08,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/15f23a7b069a4816ad0d1cdb04777cff to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/15f23a7b069a4816ad0d1cdb04777cff 2024-11-20T22:25:08,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/58e08953d7094069b3c8e109cce8e442 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/58e08953d7094069b3c8e109cce8e442 2024-11-20T22:25:08,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d1d56b6e0afd4c4d854d9a1a0c8d574d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/d1d56b6e0afd4c4d854d9a1a0c8d574d 2024-11-20T22:25:08,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/28be9ee5200a477b9529278d8a7056c1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/28be9ee5200a477b9529278d8a7056c1 2024-11-20T22:25:08,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2bdd3e249ccc480ea1e302c68f456313 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/2bdd3e249ccc480ea1e302c68f456313 2024-11-20T22:25:08,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fb38ed12412946c29c96dc0ff276a7e5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/fb38ed12412946c29c96dc0ff276a7e5 2024-11-20T22:25:08,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/9c4bd7f806ed4ceb9d1785bb3bd91c6b 2024-11-20T22:25:08,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4b23adfaaf764664b29a20c74efb43a5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/4b23adfaaf764664b29a20c74efb43a5 2024-11-20T22:25:08,075 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/recovered.edits/501.seqid, newMaxSeqId=501, maxSeqId=1 2024-11-20T22:25:08,077 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84. 2024-11-20T22:25:08,077 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1635): Region close journal for 5f1e2878eea2034576ba469d1952fe84: 2024-11-20T22:25:08,079 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(170): Closed 5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:08,079 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=85 updating hbase:meta row=5f1e2878eea2034576ba469d1952fe84, regionState=CLOSED 2024-11-20T22:25:08,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T22:25:08,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; CloseRegionProcedure 5f1e2878eea2034576ba469d1952fe84, server=6365a1e51efd,46811,1732141422048 in 1.2330 sec 2024-11-20T22:25:08,084 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=84 2024-11-20T22:25:08,084 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=84, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f1e2878eea2034576ba469d1952fe84, UNASSIGN in 1.2450 sec 2024-11-20T22:25:08,085 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T22:25:08,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.2490 sec 2024-11-20T22:25:08,087 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141508086"}]},"ts":"1732141508086"} 2024-11-20T22:25:08,089 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:25:08,099 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:25:08,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.3520 sec 2024-11-20T22:25:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T22:25:08,861 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T22:25:08,861 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:25:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:08,862 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=87, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:08,863 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=87, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T22:25:08,866 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:08,868 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/recovered.edits] 2024-11-20T22:25:08,870 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/abaaf02b8b204276a4c58899f78740b1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/abaaf02b8b204276a4c58899f78740b1 2024-11-20T22:25:08,871 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/aee6db6e226d4798815998adc5efbe0c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/aee6db6e226d4798815998adc5efbe0c 2024-11-20T22:25:08,877 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ba17e182bd3c4b9aaa02c7709e8acb97 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/A/ba17e182bd3c4b9aaa02c7709e8acb97 2024-11-20T22:25:08,879 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0ede91e4fcfe4ad1a403a0d0afdc0171 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/0ede91e4fcfe4ad1a403a0d0afdc0171 2024-11-20T22:25:08,880 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc98f537fb0341b48639b424444c8d96 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/cc98f537fb0341b48639b424444c8d96 2024-11-20T22:25:08,884 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e902dc0220a04123a10f098b2e460d1d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/B/e902dc0220a04123a10f098b2e460d1d 2024-11-20T22:25:08,887 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/a01b991ab0a94b9584cb0e9234136ac4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/a01b991ab0a94b9584cb0e9234136ac4 2024-11-20T22:25:08,892 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e81d0837b1b34e12864aed34d5c9248f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/C/e81d0837b1b34e12864aed34d5c9248f 2024-11-20T22:25:08,894 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/recovered.edits/501.seqid to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84/recovered.edits/501.seqid 2024-11-20T22:25:08,895 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/5f1e2878eea2034576ba469d1952fe84 2024-11-20T22:25:08,895 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:25:08,897 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=87, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:08,900 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:25:08,904 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:25:08,905 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=87, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:08,905 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:25:08,905 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141508905"}]},"ts":"9223372036854775807"} 2024-11-20T22:25:08,908 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:25:08,908 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5f1e2878eea2034576ba469d1952fe84, NAME => 'TestAcidGuarantees,,1732141483962.5f1e2878eea2034576ba469d1952fe84.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:25:08,908 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:25:08,908 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141508908"}]},"ts":"9223372036854775807"} 2024-11-20T22:25:08,912 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:25:08,952 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=87, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:08,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 91 msec 2024-11-20T22:25:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T22:25:08,966 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T22:25:08,988 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=239 (was 241), OpenFileDescriptor=455 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1099 (was 1118), ProcessCount=11 (was 11), AvailableMemoryMB=2129 (was 2130) 2024-11-20T22:25:09,000 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=1099, ProcessCount=11, AvailableMemoryMB=2128 2024-11-20T22:25:09,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:25:09,007 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:25:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=88, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:09,009 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:25:09,009 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:09,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 88 2024-11-20T22:25:09,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T22:25:09,010 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:25:09,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742195_1371 (size=963) 2024-11-20T22:25:09,069 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:25:09,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T22:25:09,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742196_1372 (size=53) 2024-11-20T22:25:09,162 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:25:09,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T22:25:09,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:09,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2f10f015cf26343913efd4f9264f4075, disabling compactions & flushes 2024-11-20T22:25:09,523 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:09,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:09,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. after waiting 0 ms 2024-11-20T22:25:09,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:09,523 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:09,524 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:09,526 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:25:09,528 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141509526"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141509526"}]},"ts":"1732141509526"} 2024-11-20T22:25:09,529 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:25:09,530 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:25:09,530 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141509530"}]},"ts":"1732141509530"} 2024-11-20T22:25:09,532 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:25:09,551 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, ASSIGN}] 2024-11-20T22:25:09,553 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, ASSIGN 2024-11-20T22:25:09,554 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, ASSIGN; state=OFFLINE, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=false 2024-11-20T22:25:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T22:25:09,704 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=2f10f015cf26343913efd4f9264f4075, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:09,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; OpenRegionProcedure 2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:25:09,859 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:09,861 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:09,861 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7285): Opening region: {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:25:09,861 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,861 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:09,861 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7327): checking encryption for 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,861 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7330): checking classloading for 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,862 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,864 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:09,864 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f10f015cf26343913efd4f9264f4075 columnFamilyName A 2024-11-20T22:25:09,864 DEBUG [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:09,864 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(327): Store=2f10f015cf26343913efd4f9264f4075/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:09,864 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,865 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:09,865 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f10f015cf26343913efd4f9264f4075 columnFamilyName B 2024-11-20T22:25:09,865 DEBUG [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:09,866 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(327): Store=2f10f015cf26343913efd4f9264f4075/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:09,866 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,866 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:09,867 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f10f015cf26343913efd4f9264f4075 columnFamilyName C 2024-11-20T22:25:09,867 DEBUG [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:09,867 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(327): Store=2f10f015cf26343913efd4f9264f4075/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:09,867 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:09,868 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,868 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,869 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:25:09,870 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1085): writing seq id for 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:09,872 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:25:09,872 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1102): Opened 2f10f015cf26343913efd4f9264f4075; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75160115, jitterRate=0.11997298896312714}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:25:09,873 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1001): Region open journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:09,873 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., pid=90, masterSystemTime=1732141509859 2024-11-20T22:25:09,875 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:09,875 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:09,875 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=2f10f015cf26343913efd4f9264f4075, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:09,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T22:25:09,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; OpenRegionProcedure 2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 in 170 msec 2024-11-20T22:25:09,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-11-20T22:25:09,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, ASSIGN in 326 msec 2024-11-20T22:25:09,878 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:25:09,878 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141509878"}]},"ts":"1732141509878"} 2024-11-20T22:25:09,879 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:25:09,885 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:25:09,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 878 msec 2024-11-20T22:25:10,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T22:25:10,114 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 88 completed 2024-11-20T22:25:10,115 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c777cbc to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8aa9dd8 2024-11-20T22:25:10,135 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@520d5c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,139 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,141 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,143 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:25:10,144 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:25:10,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:25:10,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:25:10,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:10,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742197_1373 (size=999) 2024-11-20T22:25:10,168 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T22:25:10,168 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T22:25:10,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:25:10,175 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, REOPEN/MOVE}] 2024-11-20T22:25:10,175 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, REOPEN/MOVE 2024-11-20T22:25:10,178 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=2f10f015cf26343913efd4f9264f4075, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,179 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:25:10,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure 2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:25:10,331 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,332 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,332 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:25:10,332 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing 2f10f015cf26343913efd4f9264f4075, disabling compactions & flushes 2024-11-20T22:25:10,332 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,332 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,332 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. after waiting 0 ms 2024-11-20T22:25:10,332 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,336 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T22:25:10,336 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,336 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:10,336 WARN [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionServer(3786): Not adding moved region record: 2f10f015cf26343913efd4f9264f4075 to self. 2024-11-20T22:25:10,338 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,338 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=2f10f015cf26343913efd4f9264f4075, regionState=CLOSED 2024-11-20T22:25:10,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-20T22:25:10,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure 2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 in 160 msec 2024-11-20T22:25:10,340 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, REOPEN/MOVE; state=CLOSED, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=true 2024-11-20T22:25:10,491 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=2f10f015cf26343913efd4f9264f4075, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=93, state=RUNNABLE; OpenRegionProcedure 2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:25:10,644 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,647 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,647 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7285): Opening region: {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:25:10,648 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,648 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:10,648 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7327): checking encryption for 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,648 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7330): checking classloading for 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,648 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,649 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:10,649 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f10f015cf26343913efd4f9264f4075 columnFamilyName A 2024-11-20T22:25:10,650 DEBUG [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:10,652 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(327): Store=2f10f015cf26343913efd4f9264f4075/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:10,653 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,653 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:10,654 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f10f015cf26343913efd4f9264f4075 columnFamilyName B 2024-11-20T22:25:10,654 DEBUG [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:10,654 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(327): Store=2f10f015cf26343913efd4f9264f4075/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:10,654 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,655 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:10,655 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f10f015cf26343913efd4f9264f4075 columnFamilyName C 2024-11-20T22:25:10,655 DEBUG [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:10,655 INFO [StoreOpener-2f10f015cf26343913efd4f9264f4075-1 {}] regionserver.HStore(327): Store=2f10f015cf26343913efd4f9264f4075/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:10,655 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,656 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,657 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,659 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:25:10,660 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1085): writing seq id for 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,661 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1102): Opened 2f10f015cf26343913efd4f9264f4075; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63444271, jitterRate=-0.054606691002845764}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:25:10,662 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1001): Region open journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:10,663 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., pid=95, masterSystemTime=1732141510644 2024-11-20T22:25:10,664 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=2f10f015cf26343913efd4f9264f4075, regionState=OPEN, openSeqNum=5, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,664 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,665 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=93 2024-11-20T22:25:10,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; OpenRegionProcedure 2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 in 173 msec 2024-11-20T22:25:10,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-20T22:25:10,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, REOPEN/MOVE in 491 msec 2024-11-20T22:25:10,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-20T22:25:10,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 495 msec 2024-11-20T22:25:10,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 523 msec 2024-11-20T22:25:10,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T22:25:10,673 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40f02431 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1ce825aa 2024-11-20T22:25:10,722 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f657539, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,723 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40195d2e to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d01f5f2 2024-11-20T22:25:10,742 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@297987d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,743 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b466e6f to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79faa890 2024-11-20T22:25:10,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cd8d74e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,753 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4077c593 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c654b25 2024-11-20T22:25:10,767 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36dd8340, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,768 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07028425 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4178cb83 2024-11-20T22:25:10,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be5976f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,787 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4a7ac296 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d936a59 2024-11-20T22:25:10,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e28adb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,801 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7664cff8 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1043a2cf 2024-11-20T22:25:10,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b2dc848, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,811 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x375e0d48 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a8684b6 2024-11-20T22:25:10,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f3c7426, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,819 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67bf566d to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6992cd18 2024-11-20T22:25:10,826 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65830948, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,827 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39448763 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46c1775e 2024-11-20T22:25:10,835 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b75ff87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:10,837 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:10,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees 2024-11-20T22:25:10,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T22:25:10,839 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:10,839 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:10,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:10,843 DEBUG [hconnection-0x41cbba61-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,843 DEBUG [hconnection-0x450c0712-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,844 DEBUG [hconnection-0x3393d3c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,845 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,845 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,845 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54442, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,848 DEBUG [hconnection-0x66deb3d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,849 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54458, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,851 DEBUG [hconnection-0x30526586-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,853 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:10,854 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:10,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:10,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:10,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:10,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:10,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:10,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:10,863 DEBUG [hconnection-0x204de444-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,864 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141570869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141570870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141570871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,875 DEBUG [hconnection-0x5c00c24-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,876 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54500, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141570877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,887 DEBUG [hconnection-0x7d5c4ec4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,888 DEBUG [hconnection-0x50c135b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,888 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,889 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,891 DEBUG [hconnection-0x54d59e25-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:10,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141570891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,892 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:10,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d08f9eb6c52c46768312bcd565db2045_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141510846/Put/seqid=0 2024-11-20T22:25:10,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T22:25:10,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141570971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141570972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141570972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141570979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742198_1374 (size=12154) 2024-11-20T22:25:10,990 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:10,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:10,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:10,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:10,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:10,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:10,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:10,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:10,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141570993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T22:25:11,143 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:11,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:11,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,145 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141571175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141571176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141571177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141571181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141571195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,384 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:11,388 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d08f9eb6c52c46768312bcd565db2045_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d08f9eb6c52c46768312bcd565db2045_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:11,399 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/f570b0331a374ffa9d5de990b34e506f, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:11,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/f570b0331a374ffa9d5de990b34e506f is 175, key is test_row_0/A:col10/1732141510846/Put/seqid=0 2024-11-20T22:25:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T22:25:11,468 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:11,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:11,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742199_1375 (size=30955) 2024-11-20T22:25:11,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141571481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141571481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141571481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141571485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141571501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:11,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:11,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,795 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:11,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:11,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,871 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/f570b0331a374ffa9d5de990b34e506f 2024-11-20T22:25:11,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/95db2d6419724d57b92abd773d7bdb05 is 50, key is test_row_0/B:col10/1732141510846/Put/seqid=0 2024-11-20T22:25:11,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T22:25:11,950 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:11,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:11,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:11,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:11,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742200_1376 (size=12001) 2024-11-20T22:25:11,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141571983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141571983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141571989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:11,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:11,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141571989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:12,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141572015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:12,103 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:12,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:12,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:12,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:12,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:12,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,259 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:12,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:12,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:12,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:12,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:12,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/95db2d6419724d57b92abd773d7bdb05 2024-11-20T22:25:12,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/e4b3ea594e8040f799d91a1646c8a929 is 50, key is test_row_0/C:col10/1732141510846/Put/seqid=0 2024-11-20T22:25:12,414 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:12,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:12,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:12,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:12,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:12,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:12,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742201_1377 (size=12001) 2024-11-20T22:25:12,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/e4b3ea594e8040f799d91a1646c8a929 2024-11-20T22:25:12,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/f570b0331a374ffa9d5de990b34e506f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f570b0331a374ffa9d5de990b34e506f 2024-11-20T22:25:12,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f570b0331a374ffa9d5de990b34e506f, entries=150, sequenceid=17, filesize=30.2 K 2024-11-20T22:25:12,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/95db2d6419724d57b92abd773d7bdb05 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/95db2d6419724d57b92abd773d7bdb05 2024-11-20T22:25:12,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/95db2d6419724d57b92abd773d7bdb05, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T22:25:12,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/e4b3ea594e8040f799d91a1646c8a929 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/e4b3ea594e8040f799d91a1646c8a929 2024-11-20T22:25:12,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/e4b3ea594e8040f799d91a1646c8a929, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T22:25:12,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 2f10f015cf26343913efd4f9264f4075 in 1629ms, sequenceid=17, compaction requested=false 2024-11-20T22:25:12,484 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T22:25:12,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:12,569 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:12,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T22:25:12,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:12,570 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:12,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:12,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:12,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:12,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:12,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:12,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:12,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209a9880b4887d40178b8d099e636f68bb_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141510869/Put/seqid=0 2024-11-20T22:25:12,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742202_1378 (size=12154) 2024-11-20T22:25:12,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,598 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209a9880b4887d40178b8d099e636f68bb_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209a9880b4887d40178b8d099e636f68bb_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:12,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/6f10171f76ee4e5ab086be66a7a41ef0, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:12,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/6f10171f76ee4e5ab086be66a7a41ef0 is 175, key is test_row_0/A:col10/1732141510869/Put/seqid=0 2024-11-20T22:25:12,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742203_1379 (size=30955) 2024-11-20T22:25:12,641 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/6f10171f76ee4e5ab086be66a7a41ef0 2024-11-20T22:25:12,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/12a91dd2903a414e9fe98b71cd5cb409 is 50, key is test_row_0/B:col10/1732141510869/Put/seqid=0 2024-11-20T22:25:12,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742204_1380 (size=12001) 2024-11-20T22:25:12,699 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/12a91dd2903a414e9fe98b71cd5cb409 2024-11-20T22:25:12,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/092577cca0c941908f61128a39999a79 is 50, key is test_row_0/C:col10/1732141510869/Put/seqid=0 2024-11-20T22:25:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742205_1381 (size=12001) 2024-11-20T22:25:12,747 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/092577cca0c941908f61128a39999a79 2024-11-20T22:25:12,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/6f10171f76ee4e5ab086be66a7a41ef0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/6f10171f76ee4e5ab086be66a7a41ef0 2024-11-20T22:25:12,761 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/6f10171f76ee4e5ab086be66a7a41ef0, entries=150, sequenceid=42, filesize=30.2 K 2024-11-20T22:25:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/12a91dd2903a414e9fe98b71cd5cb409 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/12a91dd2903a414e9fe98b71cd5cb409 2024-11-20T22:25:12,772 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/12a91dd2903a414e9fe98b71cd5cb409, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T22:25:12,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/092577cca0c941908f61128a39999a79 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/092577cca0c941908f61128a39999a79 2024-11-20T22:25:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,779 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/092577cca0c941908f61128a39999a79, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T22:25:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,780 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 2f10f015cf26343913efd4f9264f4075 in 210ms, sequenceid=42, compaction requested=false 2024-11-20T22:25:12,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:12,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:12,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=97 2024-11-20T22:25:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=97 2024-11-20T22:25:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-20T22:25:12,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9430 sec 2024-11-20T22:25:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees in 1.9480 sec 2024-11-20T22:25:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T22:25:12,944 INFO [Thread-1680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-11-20T22:25:12,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,946 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-11-20T22:25:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T22:25:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,950 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,951 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T22:25:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,103 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T22:25:13,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:13,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-11-20T22:25:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-11-20T22:25:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-20T22:25:13,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 157 msec 2024-11-20T22:25:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 164 msec 2024-11-20T22:25:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T22:25:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,258 INFO [Thread-1680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-20T22:25:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,264 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:13,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-11-20T22:25:13,271 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:13,272 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T22:25:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120967f7f42f592425fbd4faaab57f7a05b_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141513161/Put/seqid=0 2024-11-20T22:25:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742206_1382 (size=17034) 2024-11-20T22:25:13,327 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:13,335 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120967f7f42f592425fbd4faaab57f7a05b_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120967f7f42f592425fbd4faaab57f7a05b_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:13,342 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/ac55b983def940df9aba05a7fb944c22, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:13,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/ac55b983def940df9aba05a7fb944c22 is 175, key is test_row_0/A:col10/1732141513161/Put/seqid=0 2024-11-20T22:25:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T22:25:13,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141573365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141573366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141573367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141573367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141573369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742207_1383 (size=48139) 2024-11-20T22:25:13,407 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/ac55b983def940df9aba05a7fb944c22 2024-11-20T22:25:13,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a8f88efb087e400ab8416c2a50c88539 is 50, key is test_row_0/B:col10/1732141513161/Put/seqid=0 2024-11-20T22:25:13,424 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T22:25:13,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:13,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742208_1384 (size=12001) 2024-11-20T22:25:13,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141573476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141573476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141573482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141573482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141573483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T22:25:13,576 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T22:25:13,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:13,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,648 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:25:13,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141573685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141573686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141573688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141573689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141573689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,729 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T22:25:13,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:13,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a8f88efb087e400ab8416c2a50c88539 2024-11-20T22:25:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T22:25:13,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/68fe8863067044e2b5690368c23c12ca is 50, key is test_row_0/C:col10/1732141513161/Put/seqid=0 2024-11-20T22:25:13,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T22:25:13,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:13,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:13,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:13,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742209_1385 (size=12001) 2024-11-20T22:25:13,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141573992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141573991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141573998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:13,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141573998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141573999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T22:25:14,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:14,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:14,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:14,195 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T22:25:14,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:14,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:14,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:14,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:14,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/68fe8863067044e2b5690368c23c12ca 2024-11-20T22:25:14,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/ac55b983def940df9aba05a7fb944c22 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/ac55b983def940df9aba05a7fb944c22 2024-11-20T22:25:14,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/ac55b983def940df9aba05a7fb944c22, entries=250, sequenceid=54, filesize=47.0 K 2024-11-20T22:25:14,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a8f88efb087e400ab8416c2a50c88539 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a8f88efb087e400ab8416c2a50c88539 2024-11-20T22:25:14,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a8f88efb087e400ab8416c2a50c88539, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T22:25:14,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/68fe8863067044e2b5690368c23c12ca as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/68fe8863067044e2b5690368c23c12ca 2024-11-20T22:25:14,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/68fe8863067044e2b5690368c23c12ca, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T22:25:14,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2f10f015cf26343913efd4f9264f4075 in 1084ms, sequenceid=54, compaction requested=true 2024-11-20T22:25:14,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:14,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:14,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:14,349 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:14,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:14,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:14,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:14,349 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:14,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:14,350 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T22:25:14,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,350 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:14,350 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:14,350 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/A is initiating minor compaction (all files) 2024-11-20T22:25:14,350 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/A in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:14,350 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f570b0331a374ffa9d5de990b34e506f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/6f10171f76ee4e5ab086be66a7a41ef0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/ac55b983def940df9aba05a7fb944c22] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=107.5 K 2024-11-20T22:25:14,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:14,351 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:14,351 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f570b0331a374ffa9d5de990b34e506f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/6f10171f76ee4e5ab086be66a7a41ef0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/ac55b983def940df9aba05a7fb944c22] 2024-11-20T22:25:14,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:14,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:14,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:14,351 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:14,351 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/B is initiating minor compaction (all files) 2024-11-20T22:25:14,351 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/B in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,351 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/95db2d6419724d57b92abd773d7bdb05, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/12a91dd2903a414e9fe98b71cd5cb409, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a8f88efb087e400ab8416c2a50c88539] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=35.2 K 2024-11-20T22:25:14,351 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f570b0331a374ffa9d5de990b34e506f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732141510846 2024-11-20T22:25:14,351 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 95db2d6419724d57b92abd773d7bdb05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732141510846 2024-11-20T22:25:14,352 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f10171f76ee4e5ab086be66a7a41ef0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141510867 2024-11-20T22:25:14,352 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 12a91dd2903a414e9fe98b71cd5cb409, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141510867 2024-11-20T22:25:14,352 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac55b983def940df9aba05a7fb944c22, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141513161 2024-11-20T22:25:14,352 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a8f88efb087e400ab8416c2a50c88539, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141513161 2024-11-20T22:25:14,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120649f5bea536b4727b800a9af4bbb8b3d_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141513367/Put/seqid=0 2024-11-20T22:25:14,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T22:25:14,385 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#B#compaction#325 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:14,386 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/340712cf9b72405187938d72aaefd634 is 50, key is test_row_0/B:col10/1732141513161/Put/seqid=0 2024-11-20T22:25:14,387 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:14,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742210_1386 (size=12154) 2024-11-20T22:25:14,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:14,414 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112029f14e0e36d14bd88c2c9eb5cb4b96d8_2f10f015cf26343913efd4f9264f4075 store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:14,417 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112029f14e0e36d14bd88c2c9eb5cb4b96d8_2f10f015cf26343913efd4f9264f4075, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:14,417 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120649f5bea536b4727b800a9af4bbb8b3d_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120649f5bea536b4727b800a9af4bbb8b3d_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:14,417 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112029f14e0e36d14bd88c2c9eb5cb4b96d8_2f10f015cf26343913efd4f9264f4075 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:14,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/54b1e87320184af2afd728a37914eb04, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:14,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/54b1e87320184af2afd728a37914eb04 is 175, key is test_row_0/A:col10/1732141513367/Put/seqid=0 2024-11-20T22:25:14,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742213_1389 (size=30955) 2024-11-20T22:25:14,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742211_1387 (size=12104) 2024-11-20T22:25:14,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742212_1388 (size=4469) 2024-11-20T22:25:14,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:14,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:14,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141574516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141574516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141574522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141574525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141574526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141574632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141574632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141574643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141574643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141574644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141574837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141574839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141574849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141574852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:14,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141574852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:14,879 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/54b1e87320184af2afd728a37914eb04 2024-11-20T22:25:14,886 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#A#compaction#326 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:14,886 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/68cd797a3ac6467d90d6e11fb0da4566 is 175, key is test_row_0/A:col10/1732141513161/Put/seqid=0 2024-11-20T22:25:14,904 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/340712cf9b72405187938d72aaefd634 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/340712cf9b72405187938d72aaefd634 2024-11-20T22:25:14,910 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/B of 2f10f015cf26343913efd4f9264f4075 into 340712cf9b72405187938d72aaefd634(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:14,910 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:14,910 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/B, priority=13, startTime=1732141514349; duration=0sec 2024-11-20T22:25:14,910 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:14,910 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:B 2024-11-20T22:25:14,911 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:14,912 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:14,912 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/C is initiating minor compaction (all files) 2024-11-20T22:25:14,912 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/C in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:14,912 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/e4b3ea594e8040f799d91a1646c8a929, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/092577cca0c941908f61128a39999a79, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/68fe8863067044e2b5690368c23c12ca] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=35.2 K 2024-11-20T22:25:14,912 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e4b3ea594e8040f799d91a1646c8a929, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732141510846 2024-11-20T22:25:14,912 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 092577cca0c941908f61128a39999a79, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732141510867 2024-11-20T22:25:14,913 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 68fe8863067044e2b5690368c23c12ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141513161 2024-11-20T22:25:14,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742214_1390 (size=31058) 2024-11-20T22:25:14,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a94f044e2ddc4a719e970236bd4ba38b is 50, key is test_row_0/B:col10/1732141513367/Put/seqid=0 2024-11-20T22:25:14,948 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#C#compaction#328 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:14,949 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b2e09096ee8f437783d4518a7605eeb2 is 50, key is test_row_0/C:col10/1732141513161/Put/seqid=0 2024-11-20T22:25:14,956 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/68cd797a3ac6467d90d6e11fb0da4566 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/68cd797a3ac6467d90d6e11fb0da4566 2024-11-20T22:25:14,971 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/A of 2f10f015cf26343913efd4f9264f4075 into 68cd797a3ac6467d90d6e11fb0da4566(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:14,971 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:14,971 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/A, priority=13, startTime=1732141514349; duration=0sec 2024-11-20T22:25:14,971 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:14,971 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:A 2024-11-20T22:25:14,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742215_1391 (size=12001) 2024-11-20T22:25:14,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742216_1392 (size=12104) 2024-11-20T22:25:14,999 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b2e09096ee8f437783d4518a7605eeb2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b2e09096ee8f437783d4518a7605eeb2 2024-11-20T22:25:15,007 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/C of 2f10f015cf26343913efd4f9264f4075 into b2e09096ee8f437783d4518a7605eeb2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:15,007 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:15,007 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/C, priority=13, startTime=1732141514349; duration=0sec 2024-11-20T22:25:15,007 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:15,007 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:C 2024-11-20T22:25:15,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141575142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141575150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141575157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141575161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141575165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,380 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a94f044e2ddc4a719e970236bd4ba38b 2024-11-20T22:25:15,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T22:25:15,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/abe509720d9648098642529de58a700f is 50, key is test_row_0/C:col10/1732141513367/Put/seqid=0 2024-11-20T22:25:15,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742217_1393 (size=12001) 2024-11-20T22:25:15,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141575648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141575656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141575669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141575672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:15,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141575674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:15,859 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/abe509720d9648098642529de58a700f 2024-11-20T22:25:15,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/54b1e87320184af2afd728a37914eb04 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/54b1e87320184af2afd728a37914eb04 2024-11-20T22:25:15,955 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/54b1e87320184af2afd728a37914eb04, entries=150, sequenceid=79, filesize=30.2 K 2024-11-20T22:25:15,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a94f044e2ddc4a719e970236bd4ba38b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a94f044e2ddc4a719e970236bd4ba38b 2024-11-20T22:25:15,972 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a94f044e2ddc4a719e970236bd4ba38b, entries=150, sequenceid=79, filesize=11.7 K 2024-11-20T22:25:15,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T22:25:15,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/abe509720d9648098642529de58a700f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/abe509720d9648098642529de58a700f 2024-11-20T22:25:15,985 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/abe509720d9648098642529de58a700f, entries=150, sequenceid=79, filesize=11.7 K 2024-11-20T22:25:15,986 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2f10f015cf26343913efd4f9264f4075 in 1636ms, sequenceid=79, compaction requested=false 2024-11-20T22:25:15,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:15,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:15,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-20T22:25:15,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-11-20T22:25:16,004 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-20T22:25:16,004 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7300 sec 2024-11-20T22:25:16,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 2.7400 sec 2024-11-20T22:25:16,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:25:16,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:16,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:16,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:16,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:16,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:16,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:16,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:16,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112052c456df70164a4688f467ca5869c375_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141514521/Put/seqid=0 2024-11-20T22:25:16,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742218_1394 (size=14594) 2024-11-20T22:25:16,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141576759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141576759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141576769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141576770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141576774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141576872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141576872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141576872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141576874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:16,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:16,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141576882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141577083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141577083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141577086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141577087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141577091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,139 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:17,157 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112052c456df70164a4688f467ca5869c375_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052c456df70164a4688f467ca5869c375_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:17,158 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b564f774f9b541cdb28ad7d59e8cb9d1, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:17,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b564f774f9b541cdb28ad7d59e8cb9d1 is 175, key is test_row_0/A:col10/1732141514521/Put/seqid=0 2024-11-20T22:25:17,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742219_1395 (size=39549) 2024-11-20T22:25:17,171 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b564f774f9b541cdb28ad7d59e8cb9d1 2024-11-20T22:25:17,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/d517e258f8e94f3f8e435dedc61b60c7 is 50, key is test_row_0/B:col10/1732141514521/Put/seqid=0 2024-11-20T22:25:17,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742220_1396 (size=12001) 2024-11-20T22:25:17,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T22:25:17,383 INFO [Thread-1680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-11-20T22:25:17,388 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:17,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-20T22:25:17,389 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:17,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:25:17,390 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:17,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:17,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141577390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141577392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141577397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141577399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141577408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:25:17,543 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:25:17,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:17,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:17,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:17,544 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/d517e258f8e94f3f8e435dedc61b60c7 2024-11-20T22:25:17,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/81b2cd9c005e4741a544933bf09be970 is 50, key is test_row_0/C:col10/1732141514521/Put/seqid=0 2024-11-20T22:25:17,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742221_1397 (size=12001) 2024-11-20T22:25:17,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:25:17,696 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:25:17,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:17,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:17,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:17,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,849 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:25:17,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:17,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:17,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:17,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:17,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141577895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141577903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141577906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141577911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:17,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141577917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:25:18,002 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:18,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:25:18,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:18,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:18,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:18,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:18,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:18,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:18,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/81b2cd9c005e4741a544933bf09be970 2024-11-20T22:25:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b564f774f9b541cdb28ad7d59e8cb9d1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b564f774f9b541cdb28ad7d59e8cb9d1 2024-11-20T22:25:18,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b564f774f9b541cdb28ad7d59e8cb9d1, entries=200, sequenceid=94, filesize=38.6 K 2024-11-20T22:25:18,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/d517e258f8e94f3f8e435dedc61b60c7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/d517e258f8e94f3f8e435dedc61b60c7 2024-11-20T22:25:18,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/d517e258f8e94f3f8e435dedc61b60c7, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T22:25:18,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/81b2cd9c005e4741a544933bf09be970 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/81b2cd9c005e4741a544933bf09be970 2024-11-20T22:25:18,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/81b2cd9c005e4741a544933bf09be970, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T22:25:18,122 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T22:25:18,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 2f10f015cf26343913efd4f9264f4075 in 1456ms, sequenceid=94, compaction requested=true 2024-11-20T22:25:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:18,123 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:18,124 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:18,124 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/A is initiating minor compaction (all files) 2024-11-20T22:25:18,124 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/A in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:18,124 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/68cd797a3ac6467d90d6e11fb0da4566, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/54b1e87320184af2afd728a37914eb04, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b564f774f9b541cdb28ad7d59e8cb9d1] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=99.2 K 2024-11-20T22:25:18,124 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:18,124 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/68cd797a3ac6467d90d6e11fb0da4566, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/54b1e87320184af2afd728a37914eb04, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b564f774f9b541cdb28ad7d59e8cb9d1] 2024-11-20T22:25:18,125 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:18,125 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68cd797a3ac6467d90d6e11fb0da4566, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141513161 2024-11-20T22:25:18,125 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54b1e87320184af2afd728a37914eb04, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732141513362 2024-11-20T22:25:18,126 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b564f774f9b541cdb28ad7d59e8cb9d1, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732141514516 2024-11-20T22:25:18,136 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:18,136 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/B is initiating minor compaction (all files) 2024-11-20T22:25:18,136 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/B in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:18,136 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/340712cf9b72405187938d72aaefd634, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a94f044e2ddc4a719e970236bd4ba38b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/d517e258f8e94f3f8e435dedc61b60c7] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=35.3 K 2024-11-20T22:25:18,136 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 340712cf9b72405187938d72aaefd634, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141513161 2024-11-20T22:25:18,137 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a94f044e2ddc4a719e970236bd4ba38b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732141513362 2024-11-20T22:25:18,137 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d517e258f8e94f3f8e435dedc61b60c7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732141514516 2024-11-20T22:25:18,153 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#B#compaction#333 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:18,153 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/4880bd7b53fa43e2b616f0b766bf490b is 50, key is test_row_0/B:col10/1732141514521/Put/seqid=0 2024-11-20T22:25:18,155 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:18,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T22:25:18,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:18,156 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:18,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:18,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:18,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:18,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:18,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:18,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:18,171 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:18,184 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120608fa74c3d4f4e31bb13f7b47d70003e_2f10f015cf26343913efd4f9264f4075 store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:18,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742222_1398 (size=12207) 2024-11-20T22:25:18,188 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120608fa74c3d4f4e31bb13f7b47d70003e_2f10f015cf26343913efd4f9264f4075, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:18,188 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120608fa74c3d4f4e31bb13f7b47d70003e_2f10f015cf26343913efd4f9264f4075 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:18,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a4d9bbb7fff144d48fe5a76e0c56a9a0_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141516713/Put/seqid=0 2024-11-20T22:25:18,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742223_1399 (size=4469) 2024-11-20T22:25:18,206 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#A#compaction#334 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:18,206 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c277024ff13248b2a0f221939aeae81b is 175, key is test_row_0/A:col10/1732141514521/Put/seqid=0 2024-11-20T22:25:18,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742224_1400 (size=12154) 2024-11-20T22:25:18,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:18,212 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a4d9bbb7fff144d48fe5a76e0c56a9a0_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4d9bbb7fff144d48fe5a76e0c56a9a0_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:18,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/01940ff525ed40db85755e69c48cc29c, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:18,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/01940ff525ed40db85755e69c48cc29c is 175, key is test_row_0/A:col10/1732141516713/Put/seqid=0 2024-11-20T22:25:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742225_1401 (size=31161) 2024-11-20T22:25:18,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742226_1402 (size=30955) 2024-11-20T22:25:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:25:18,590 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/4880bd7b53fa43e2b616f0b766bf490b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/4880bd7b53fa43e2b616f0b766bf490b 2024-11-20T22:25:18,594 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/B of 2f10f015cf26343913efd4f9264f4075 into 4880bd7b53fa43e2b616f0b766bf490b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:18,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:18,594 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/B, priority=13, startTime=1732141518123; duration=0sec 2024-11-20T22:25:18,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:18,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:B 2024-11-20T22:25:18,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:18,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:18,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/C is initiating minor compaction (all files) 2024-11-20T22:25:18,595 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/C in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:18,595 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b2e09096ee8f437783d4518a7605eeb2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/abe509720d9648098642529de58a700f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/81b2cd9c005e4741a544933bf09be970] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=35.3 K 2024-11-20T22:25:18,595 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b2e09096ee8f437783d4518a7605eeb2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732141513161 2024-11-20T22:25:18,595 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting abe509720d9648098642529de58a700f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732141513362 2024-11-20T22:25:18,595 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 81b2cd9c005e4741a544933bf09be970, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732141514516 2024-11-20T22:25:18,605 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#C#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:18,606 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/fa2765e5d41147fc9af083695fb409e2 is 50, key is test_row_0/C:col10/1732141514521/Put/seqid=0 2024-11-20T22:25:18,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742227_1403 (size=12207) 2024-11-20T22:25:18,620 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c277024ff13248b2a0f221939aeae81b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c277024ff13248b2a0f221939aeae81b 2024-11-20T22:25:18,622 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/01940ff525ed40db85755e69c48cc29c 2024-11-20T22:25:18,626 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/A of 2f10f015cf26343913efd4f9264f4075 into c277024ff13248b2a0f221939aeae81b(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:18,626 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:18,626 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/A, priority=13, startTime=1732141518123; duration=0sec 2024-11-20T22:25:18,626 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:18,626 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:A 2024-11-20T22:25:18,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a41b1a67d53146c199d6d3f351fae93d is 50, key is test_row_0/B:col10/1732141516713/Put/seqid=0 2024-11-20T22:25:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742228_1404 (size=12001) 2024-11-20T22:25:18,649 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a41b1a67d53146c199d6d3f351fae93d 2024-11-20T22:25:18,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/101f7056daf549acb4cf9a5f4101fba0 is 50, key is test_row_0/C:col10/1732141516713/Put/seqid=0 2024-11-20T22:25:18,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742229_1405 (size=12001) 2024-11-20T22:25:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:18,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:18,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:18,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141578928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141578928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:18,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141578935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:18,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141578937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:18,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141578937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,023 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/fa2765e5d41147fc9af083695fb409e2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/fa2765e5d41147fc9af083695fb409e2 2024-11-20T22:25:19,029 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/C of 2f10f015cf26343913efd4f9264f4075 into fa2765e5d41147fc9af083695fb409e2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:19,029 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:19,029 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/C, priority=13, startTime=1732141518123; duration=0sec 2024-11-20T22:25:19,029 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:19,029 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:C 2024-11-20T22:25:19,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141579039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141579039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141579039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141579045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141579045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,071 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/101f7056daf549acb4cf9a5f4101fba0 2024-11-20T22:25:19,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/01940ff525ed40db85755e69c48cc29c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/01940ff525ed40db85755e69c48cc29c 2024-11-20T22:25:19,086 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/01940ff525ed40db85755e69c48cc29c, entries=150, sequenceid=118, filesize=30.2 K 2024-11-20T22:25:19,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a41b1a67d53146c199d6d3f351fae93d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a41b1a67d53146c199d6d3f351fae93d 2024-11-20T22:25:19,093 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a41b1a67d53146c199d6d3f351fae93d, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T22:25:19,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/101f7056daf549acb4cf9a5f4101fba0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/101f7056daf549acb4cf9a5f4101fba0 2024-11-20T22:25:19,099 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/101f7056daf549acb4cf9a5f4101fba0, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T22:25:19,101 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 2f10f015cf26343913efd4f9264f4075 in 945ms, sequenceid=118, compaction requested=false 2024-11-20T22:25:19,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:19,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:19,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-20T22:25:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-20T22:25:19,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-20T22:25:19,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7130 sec 2024-11-20T22:25:19,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.7220 sec 2024-11-20T22:25:19,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:19,248 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:19,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:19,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:19,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:19,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:19,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:19,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:19,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112015f0f098b6674f86a40ecb6a27b078c6_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141519246/Put/seqid=0 2024-11-20T22:25:19,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742230_1406 (size=14744) 2024-11-20T22:25:19,292 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:19,296 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112015f0f098b6674f86a40ecb6a27b078c6_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112015f0f098b6674f86a40ecb6a27b078c6_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:19,297 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/fb52741587f34b73b2b8808848571214, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:19,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/fb52741587f34b73b2b8808848571214 is 175, key is test_row_0/A:col10/1732141519246/Put/seqid=0 2024-11-20T22:25:19,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742231_1407 (size=39699) 2024-11-20T22:25:19,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141579321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141579335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141579337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141579337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141579339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141579446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141579447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141579447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141579450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141579450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T22:25:19,494 INFO [Thread-1680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-20T22:25:19,495 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:19,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-20T22:25:19,500 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:19,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:25:19,500 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:19,500 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:19,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:25:19,654 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:25:19,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141579652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141579652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:19,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:19,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:19,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141579652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141579658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141579660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,705 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/fb52741587f34b73b2b8808848571214 2024-11-20T22:25:19,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/ee0c4892408047b8a34526b2c0935f18 is 50, key is test_row_0/B:col10/1732141519246/Put/seqid=0 2024-11-20T22:25:19,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742232_1408 (size=12151) 2024-11-20T22:25:19,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/ee0c4892408047b8a34526b2c0935f18 2024-11-20T22:25:19,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b1410ec42af24a039e601812b77b77b9 is 50, key is test_row_0/C:col10/1732141519246/Put/seqid=0 2024-11-20T22:25:19,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742233_1409 (size=12151) 2024-11-20T22:25:19,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:25:19,808 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:25:19,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:19,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:19,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:19,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,960 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141579959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141579963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:25:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:19,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:19,971 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:19,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141579971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141579975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:19,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141579976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:25:20,126 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:25:20,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:20,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:20,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:20,127 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:20,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b1410ec42af24a039e601812b77b77b9 2024-11-20T22:25:20,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/fb52741587f34b73b2b8808848571214 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fb52741587f34b73b2b8808848571214 2024-11-20T22:25:20,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fb52741587f34b73b2b8808848571214, entries=200, sequenceid=134, filesize=38.8 K 2024-11-20T22:25:20,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/ee0c4892408047b8a34526b2c0935f18 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/ee0c4892408047b8a34526b2c0935f18 2024-11-20T22:25:20,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/ee0c4892408047b8a34526b2c0935f18, entries=150, sequenceid=134, filesize=11.9 K 2024-11-20T22:25:20,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b1410ec42af24a039e601812b77b77b9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b1410ec42af24a039e601812b77b77b9 2024-11-20T22:25:20,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b1410ec42af24a039e601812b77b77b9, entries=150, sequenceid=134, filesize=11.9 K 2024-11-20T22:25:20,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 2f10f015cf26343913efd4f9264f4075 in 974ms, sequenceid=134, compaction requested=true 2024-11-20T22:25:20,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:20,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:20,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:20,222 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:20,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:20,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:20,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:20,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:20,224 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:20,225 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:20,225 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:20,225 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/B is initiating minor compaction (all files) 2024-11-20T22:25:20,225 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/A is initiating minor compaction (all files) 2024-11-20T22:25:20,225 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/A in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:20,225 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c277024ff13248b2a0f221939aeae81b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/01940ff525ed40db85755e69c48cc29c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fb52741587f34b73b2b8808848571214] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=99.4 K 2024-11-20T22:25:20,225 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:20,225 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c277024ff13248b2a0f221939aeae81b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/01940ff525ed40db85755e69c48cc29c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fb52741587f34b73b2b8808848571214] 2024-11-20T22:25:20,226 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/B in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:20,226 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c277024ff13248b2a0f221939aeae81b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732141514516 2024-11-20T22:25:20,226 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/4880bd7b53fa43e2b616f0b766bf490b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a41b1a67d53146c199d6d3f351fae93d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/ee0c4892408047b8a34526b2c0935f18] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=35.5 K 2024-11-20T22:25:20,226 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4880bd7b53fa43e2b616f0b766bf490b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732141514516 2024-11-20T22:25:20,226 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 01940ff525ed40db85755e69c48cc29c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141516713 2024-11-20T22:25:20,226 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fb52741587f34b73b2b8808848571214, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732141518927 2024-11-20T22:25:20,227 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a41b1a67d53146c199d6d3f351fae93d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141516713 2024-11-20T22:25:20,239 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee0c4892408047b8a34526b2c0935f18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732141518935 2024-11-20T22:25:20,259 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:20,260 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#B#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:20,261 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/9675f95691b64604999fcf732a026200 is 50, key is test_row_0/B:col10/1732141519246/Put/seqid=0 2024-11-20T22:25:20,261 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112091317879390f44d6a3ccb2ce4b7ded8c_2f10f015cf26343913efd4f9264f4075 store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:20,262 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112091317879390f44d6a3ccb2ce4b7ded8c_2f10f015cf26343913efd4f9264f4075, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:20,262 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112091317879390f44d6a3ccb2ce4b7ded8c_2f10f015cf26343913efd4f9264f4075 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:20,280 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T22:25:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:20,282 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:25:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:20,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742234_1410 (size=12459) 2024-11-20T22:25:20,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742235_1411 (size=4469) 2024-11-20T22:25:20,305 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#A#compaction#343 average throughput is 0.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:20,305 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/84aefa4a8d2a477ba61cce859787f4cd is 175, key is test_row_0/A:col10/1732141519246/Put/seqid=0 2024-11-20T22:25:20,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202295527450b14e7a825efdaff1db5cd0_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141519320/Put/seqid=0 2024-11-20T22:25:20,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742237_1413 (size=12304) 2024-11-20T22:25:20,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742236_1412 (size=31413) 2024-11-20T22:25:20,373 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/84aefa4a8d2a477ba61cce859787f4cd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/84aefa4a8d2a477ba61cce859787f4cd 2024-11-20T22:25:20,380 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/A of 2f10f015cf26343913efd4f9264f4075 into 84aefa4a8d2a477ba61cce859787f4cd(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:20,380 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:20,380 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/A, priority=13, startTime=1732141520222; duration=0sec 2024-11-20T22:25:20,380 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:20,380 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:A 2024-11-20T22:25:20,380 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:20,382 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:20,383 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/C is initiating minor compaction (all files) 2024-11-20T22:25:20,383 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/C in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:20,383 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/fa2765e5d41147fc9af083695fb409e2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/101f7056daf549acb4cf9a5f4101fba0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b1410ec42af24a039e601812b77b77b9] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=35.5 K 2024-11-20T22:25:20,384 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fa2765e5d41147fc9af083695fb409e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732141514516 2024-11-20T22:25:20,384 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 101f7056daf549acb4cf9a5f4101fba0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732141516713 2024-11-20T22:25:20,385 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b1410ec42af24a039e601812b77b77b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732141518935 2024-11-20T22:25:20,401 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#C#compaction#345 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:20,402 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b8bba32c56a14bdcbd89fe686d9dcca9 is 50, key is test_row_0/C:col10/1732141519246/Put/seqid=0 2024-11-20T22:25:20,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742238_1414 (size=12459) 2024-11-20T22:25:20,440 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b8bba32c56a14bdcbd89fe686d9dcca9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b8bba32c56a14bdcbd89fe686d9dcca9 2024-11-20T22:25:20,447 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/C of 2f10f015cf26343913efd4f9264f4075 into b8bba32c56a14bdcbd89fe686d9dcca9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:20,447 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:20,447 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/C, priority=13, startTime=1732141520222; duration=0sec 2024-11-20T22:25:20,447 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:20,447 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:C 2024-11-20T22:25:20,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:20,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:20,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141580511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141580512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141580515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141580520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141580527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:25:20,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141580629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141580629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141580632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141580635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141580638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,703 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/9675f95691b64604999fcf732a026200 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/9675f95691b64604999fcf732a026200 2024-11-20T22:25:20,715 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/B of 2f10f015cf26343913efd4f9264f4075 into 9675f95691b64604999fcf732a026200(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:20,715 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:20,715 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/B, priority=13, startTime=1732141520222; duration=0sec 2024-11-20T22:25:20,715 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:20,715 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:B 2024-11-20T22:25:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:20,756 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202295527450b14e7a825efdaff1db5cd0_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202295527450b14e7a825efdaff1db5cd0_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:20,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/500131a98eec427e8997ef5a3cb19278, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:20,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/500131a98eec427e8997ef5a3cb19278 is 175, key is test_row_0/A:col10/1732141519320/Put/seqid=0 2024-11-20T22:25:20,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742239_1415 (size=31105) 2024-11-20T22:25:20,795 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/500131a98eec427e8997ef5a3cb19278 2024-11-20T22:25:20,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/1cf6f51712554db2b07cb5f0cf2cd275 is 50, key is test_row_0/B:col10/1732141519320/Put/seqid=0 2024-11-20T22:25:20,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141580841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141580841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141580841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141580842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:20,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141580852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:20,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742240_1416 (size=12151) 2024-11-20T22:25:21,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141581150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141581150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141581151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141581152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141581162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,271 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/1cf6f51712554db2b07cb5f0cf2cd275 2024-11-20T22:25:21,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/232aea56b9ad421bb1f993a605142024 is 50, key is test_row_0/C:col10/1732141519320/Put/seqid=0 2024-11-20T22:25:21,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742241_1417 (size=12151) 2024-11-20T22:25:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:25:21,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141581664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141581666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141581666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141581666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:21,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141581676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:21,734 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/232aea56b9ad421bb1f993a605142024 2024-11-20T22:25:21,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/500131a98eec427e8997ef5a3cb19278 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/500131a98eec427e8997ef5a3cb19278 2024-11-20T22:25:21,743 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/500131a98eec427e8997ef5a3cb19278, entries=150, sequenceid=157, filesize=30.4 K 2024-11-20T22:25:21,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/1cf6f51712554db2b07cb5f0cf2cd275 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/1cf6f51712554db2b07cb5f0cf2cd275 2024-11-20T22:25:21,748 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/1cf6f51712554db2b07cb5f0cf2cd275, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T22:25:21,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/232aea56b9ad421bb1f993a605142024 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/232aea56b9ad421bb1f993a605142024 2024-11-20T22:25:21,754 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/232aea56b9ad421bb1f993a605142024, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T22:25:21,755 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 2f10f015cf26343913efd4f9264f4075 in 1473ms, sequenceid=157, compaction requested=false 2024-11-20T22:25:21,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:21,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:21,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-20T22:25:21,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-20T22:25:21,762 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-20T22:25:21,762 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2610 sec 2024-11-20T22:25:21,764 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.2670 sec 2024-11-20T22:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:22,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:25:22,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:22,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:22,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:22,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:22,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:22,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:22,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f4d085cad3de4757804148ddc937133c_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141522693/Put/seqid=0 2024-11-20T22:25:22,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141582749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141582753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742242_1418 (size=19774) 2024-11-20T22:25:22,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141582754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141582755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141582759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141582865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141582868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141582870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141582870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:22,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141582870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141583078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141583078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141583078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141583087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141583090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,168 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:23,172 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f4d085cad3de4757804148ddc937133c_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f4d085cad3de4757804148ddc937133c_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:23,173 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/fc58fd30fe0b405abcf6ecb358f2d290, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:23,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/fc58fd30fe0b405abcf6ecb358f2d290 is 175, key is test_row_0/A:col10/1732141522693/Put/seqid=0 2024-11-20T22:25:23,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742243_1419 (size=57033) 2024-11-20T22:25:23,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141583392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141583392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141583395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141583399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141583401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T22:25:23,622 INFO [Thread-1680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-20T22:25:23,623 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=177, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/fc58fd30fe0b405abcf6ecb358f2d290 2024-11-20T22:25:23,625 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:23,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-20T22:25:23,626 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:23,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:23,627 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:23,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:23,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/2a76dacc5334432c8192cb18c817705c is 50, key is test_row_0/B:col10/1732141522693/Put/seqid=0 2024-11-20T22:25:23,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742244_1420 (size=12151) 2024-11-20T22:25:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:23,780 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T22:25:23,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:23,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:23,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:23,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141583898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141583900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141583906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141583912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:23,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141583912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:23,937 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:23,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T22:25:23,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:23,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:23,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:23,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:23,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:24,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/2a76dacc5334432c8192cb18c817705c 2024-11-20T22:25:24,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:24,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/adc38507df7b495e84594a8597e9f086 is 50, key is test_row_0/C:col10/1732141522693/Put/seqid=0 2024-11-20T22:25:24,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T22:25:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:24,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:24,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:24,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742245_1421 (size=12151) 2024-11-20T22:25:24,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/adc38507df7b495e84594a8597e9f086 2024-11-20T22:25:24,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/fc58fd30fe0b405abcf6ecb358f2d290 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fc58fd30fe0b405abcf6ecb358f2d290 2024-11-20T22:25:24,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fc58fd30fe0b405abcf6ecb358f2d290, entries=300, sequenceid=177, filesize=55.7 K 2024-11-20T22:25:24,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/2a76dacc5334432c8192cb18c817705c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/2a76dacc5334432c8192cb18c817705c 2024-11-20T22:25:24,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/2a76dacc5334432c8192cb18c817705c, entries=150, sequenceid=177, filesize=11.9 K 2024-11-20T22:25:24,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/adc38507df7b495e84594a8597e9f086 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/adc38507df7b495e84594a8597e9f086 2024-11-20T22:25:24,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/adc38507df7b495e84594a8597e9f086, entries=150, sequenceid=177, filesize=11.9 K 2024-11-20T22:25:24,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 2f10f015cf26343913efd4f9264f4075 in 1488ms, sequenceid=177, compaction requested=true 2024-11-20T22:25:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:25:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T22:25:24,181 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:24,182 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:24,187 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:24,187 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:24,187 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/A is initiating minor compaction (all files) 2024-11-20T22:25:24,187 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/C is initiating minor compaction (all files) 2024-11-20T22:25:24,187 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/A in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:24,187 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/C in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:24,188 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b8bba32c56a14bdcbd89fe686d9dcca9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/232aea56b9ad421bb1f993a605142024, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/adc38507df7b495e84594a8597e9f086] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=35.9 K 2024-11-20T22:25:24,188 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/84aefa4a8d2a477ba61cce859787f4cd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/500131a98eec427e8997ef5a3cb19278, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fc58fd30fe0b405abcf6ecb358f2d290] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=116.7 K 2024-11-20T22:25:24,188 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:24,188 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/84aefa4a8d2a477ba61cce859787f4cd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/500131a98eec427e8997ef5a3cb19278, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fc58fd30fe0b405abcf6ecb358f2d290] 2024-11-20T22:25:24,188 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b8bba32c56a14bdcbd89fe686d9dcca9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732141518935 2024-11-20T22:25:24,188 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84aefa4a8d2a477ba61cce859787f4cd, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732141518935 2024-11-20T22:25:24,189 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 232aea56b9ad421bb1f993a605142024, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141519320 2024-11-20T22:25:24,190 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting adc38507df7b495e84594a8597e9f086, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141522688 2024-11-20T22:25:24,190 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 500131a98eec427e8997ef5a3cb19278, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141519320 2024-11-20T22:25:24,190 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc58fd30fe0b405abcf6ecb358f2d290, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141520488 2024-11-20T22:25:24,213 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:24,223 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#C#compaction#352 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:24,223 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/9d51db45bf424689b88d6a2b595667eb is 50, key is test_row_0/C:col10/1732141522693/Put/seqid=0 2024-11-20T22:25:24,231 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120ae87a09bc3cd44658e785fa3b5d10903_2f10f015cf26343913efd4f9264f4075 store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:24,234 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120ae87a09bc3cd44658e785fa3b5d10903_2f10f015cf26343913efd4f9264f4075, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:24,234 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ae87a09bc3cd44658e785fa3b5d10903_2f10f015cf26343913efd4f9264f4075 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:24,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:24,249 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:24,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T22:25:24,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:24,250 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T22:25:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:24,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742247_1423 (size=4469) 2024-11-20T22:25:24,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742246_1422 (size=12561) 2024-11-20T22:25:24,281 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#A#compaction#351 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:24,282 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/f1aa112565e04d2daac44555676050ef is 175, key is test_row_0/A:col10/1732141522693/Put/seqid=0 2024-11-20T22:25:24,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112072dbcc014f0440de95aa1c71d9127b71_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141522750/Put/seqid=0 2024-11-20T22:25:24,296 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/9d51db45bf424689b88d6a2b595667eb as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/9d51db45bf424689b88d6a2b595667eb 2024-11-20T22:25:24,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742248_1424 (size=31515) 2024-11-20T22:25:24,310 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/C of 2f10f015cf26343913efd4f9264f4075 into 9d51db45bf424689b88d6a2b595667eb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:24,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:24,310 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/C, priority=13, startTime=1732141524181; duration=0sec 2024-11-20T22:25:24,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:24,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:C 2024-11-20T22:25:24,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:24,312 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:24,312 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/B is initiating minor compaction (all files) 2024-11-20T22:25:24,312 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/B in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:24,312 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/9675f95691b64604999fcf732a026200, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/1cf6f51712554db2b07cb5f0cf2cd275, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/2a76dacc5334432c8192cb18c817705c] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=35.9 K 2024-11-20T22:25:24,312 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9675f95691b64604999fcf732a026200, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732141518935 2024-11-20T22:25:24,313 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cf6f51712554db2b07cb5f0cf2cd275, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732141519320 2024-11-20T22:25:24,313 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a76dacc5334432c8192cb18c817705c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141522688 2024-11-20T22:25:24,325 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#B#compaction#354 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:24,326 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/61d85a3085fc458bb2d0b93de3bc8c3b is 50, key is test_row_0/B:col10/1732141522693/Put/seqid=0 2024-11-20T22:25:24,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742249_1425 (size=12304) 2024-11-20T22:25:24,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742250_1426 (size=12561) 2024-11-20T22:25:24,714 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/f1aa112565e04d2daac44555676050ef as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f1aa112565e04d2daac44555676050ef 2024-11-20T22:25:24,723 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/A of 2f10f015cf26343913efd4f9264f4075 into f1aa112565e04d2daac44555676050ef(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:24,723 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:24,723 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/A, priority=13, startTime=1732141524181; duration=0sec 2024-11-20T22:25:24,723 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:24,723 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:A 2024-11-20T22:25:24,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:24,736 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112072dbcc014f0440de95aa1c71d9127b71_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112072dbcc014f0440de95aa1c71d9127b71_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:24,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/59194c5ac65241a6af2ca2c87e07c5b6, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:24,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/59194c5ac65241a6af2ca2c87e07c5b6 is 175, key is test_row_0/A:col10/1732141522750/Put/seqid=0 2024-11-20T22:25:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:24,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742251_1427 (size=31105) 2024-11-20T22:25:24,769 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/59194c5ac65241a6af2ca2c87e07c5b6 2024-11-20T22:25:24,780 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/61d85a3085fc458bb2d0b93de3bc8c3b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/61d85a3085fc458bb2d0b93de3bc8c3b 2024-11-20T22:25:24,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/6fe58802982042e08220365c721494a5 is 50, key is test_row_0/B:col10/1732141522750/Put/seqid=0 2024-11-20T22:25:24,785 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/B of 2f10f015cf26343913efd4f9264f4075 into 61d85a3085fc458bb2d0b93de3bc8c3b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:24,785 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:24,785 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/B, priority=13, startTime=1732141524181; duration=0sec 2024-11-20T22:25:24,785 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:24,785 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:B 2024-11-20T22:25:24,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742252_1428 (size=12151) 2024-11-20T22:25:24,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:24,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:24,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141584943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:24,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141584948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:24,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141584950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:24,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141584950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:24,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:24,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141584951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141585053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141585064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141585067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141585069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141585067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,232 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/6fe58802982042e08220365c721494a5 2024-11-20T22:25:25,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/146c2a5c46ba4dfeb93b792cdebf434a is 50, key is test_row_0/C:col10/1732141522750/Put/seqid=0 2024-11-20T22:25:25,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742253_1429 (size=12151) 2024-11-20T22:25:25,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141585260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141585270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141585275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141585282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141585282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141585566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141585577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141585579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141585591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141585592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,649 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/146c2a5c46ba4dfeb93b792cdebf434a 2024-11-20T22:25:25,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/59194c5ac65241a6af2ca2c87e07c5b6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/59194c5ac65241a6af2ca2c87e07c5b6 2024-11-20T22:25:25,664 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/59194c5ac65241a6af2ca2c87e07c5b6, entries=150, sequenceid=197, filesize=30.4 K 2024-11-20T22:25:25,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/6fe58802982042e08220365c721494a5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6fe58802982042e08220365c721494a5 2024-11-20T22:25:25,685 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6fe58802982042e08220365c721494a5, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T22:25:25,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/146c2a5c46ba4dfeb93b792cdebf434a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/146c2a5c46ba4dfeb93b792cdebf434a 2024-11-20T22:25:25,691 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/146c2a5c46ba4dfeb93b792cdebf434a, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T22:25:25,692 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2f10f015cf26343913efd4f9264f4075 in 1442ms, sequenceid=197, compaction requested=false 2024-11-20T22:25:25,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:25,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:25,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-20T22:25:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-20T22:25:25,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T22:25:25,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0660 sec 2024-11-20T22:25:25,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.0700 sec 2024-11-20T22:25:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T22:25:25,739 INFO [Thread-1680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-20T22:25:25,740 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-20T22:25:25,741 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:25,742 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:25,742 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:25,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:25,896 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:25,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T22:25:25,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:25,896 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T22:25:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:25,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b7310864a2624a0c92d005a9b1b5bbc4_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141524950/Put/seqid=0 2024-11-20T22:25:25,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742254_1430 (size=12304) 2024-11-20T22:25:25,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:25,969 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b7310864a2624a0c92d005a9b1b5bbc4_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b7310864a2624a0c92d005a9b1b5bbc4_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:25,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/a484817be80c461ea4b55a7fbe97c640, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:25,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/a484817be80c461ea4b55a7fbe97c640 is 175, key is test_row_0/A:col10/1732141524950/Put/seqid=0 2024-11-20T22:25:26,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742255_1431 (size=31105) 2024-11-20T22:25:26,026 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=216, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/a484817be80c461ea4b55a7fbe97c640 2024-11-20T22:25:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:26,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/0d9eb3c785c443ae8701415e5f559017 is 50, key is test_row_0/B:col10/1732141524950/Put/seqid=0 2024-11-20T22:25:26,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742256_1432 (size=12151) 2024-11-20T22:25:26,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:26,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141586131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141586133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141586134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141586139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141586143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141586248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141586248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141586248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141586249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141586254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:26,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141586454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141586458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141586456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141586459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,470 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/0d9eb3c785c443ae8701415e5f559017 2024-11-20T22:25:26,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141586467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/34d90e356ac8482ebed5484323ba7046 is 50, key is test_row_0/C:col10/1732141524950/Put/seqid=0 2024-11-20T22:25:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742257_1433 (size=12151) 2024-11-20T22:25:26,521 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/34d90e356ac8482ebed5484323ba7046 2024-11-20T22:25:26,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/a484817be80c461ea4b55a7fbe97c640 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/a484817be80c461ea4b55a7fbe97c640 2024-11-20T22:25:26,543 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/a484817be80c461ea4b55a7fbe97c640, entries=150, sequenceid=216, filesize=30.4 K 2024-11-20T22:25:26,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/0d9eb3c785c443ae8701415e5f559017 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/0d9eb3c785c443ae8701415e5f559017 2024-11-20T22:25:26,551 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/0d9eb3c785c443ae8701415e5f559017, entries=150, sequenceid=216, filesize=11.9 K 2024-11-20T22:25:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/34d90e356ac8482ebed5484323ba7046 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/34d90e356ac8482ebed5484323ba7046 2024-11-20T22:25:26,558 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/34d90e356ac8482ebed5484323ba7046, entries=150, sequenceid=216, filesize=11.9 K 2024-11-20T22:25:26,559 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 2f10f015cf26343913efd4f9264f4075 in 663ms, sequenceid=216, compaction requested=true 2024-11-20T22:25:26,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:26,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:26,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-20T22:25:26,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-20T22:25:26,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-20T22:25:26,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 820 msec 2024-11-20T22:25:26,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 823 msec 2024-11-20T22:25:26,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:26,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:25:26,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:26,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:26,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:26,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:26,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:26,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:26,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207e108d7e4bde42f8a2d2f2dd86093238_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141526132/Put/seqid=0 2024-11-20T22:25:26,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141586817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141586818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141586822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141586824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141586824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T22:25:26,846 INFO [Thread-1680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-20T22:25:26,847 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:26,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-20T22:25:26,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:26,848 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:26,849 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:26,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:26,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742258_1434 (size=12304) 2024-11-20T22:25:26,853 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:26,859 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207e108d7e4bde42f8a2d2f2dd86093238_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207e108d7e4bde42f8a2d2f2dd86093238_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:26,862 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/40c33c47c6a1419eabc4145442c06a32, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:26,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/40c33c47c6a1419eabc4145442c06a32 is 175, key is test_row_0/A:col10/1732141526132/Put/seqid=0 2024-11-20T22:25:26,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742259_1435 (size=31105) 2024-11-20T22:25:26,924 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=240, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/40c33c47c6a1419eabc4145442c06a32 2024-11-20T22:25:26,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141586926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141586930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141586931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:26,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141586938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:26,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141586939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:26,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/dcbfc89ac2ea44dab0745e77587eb48d is 50, key is test_row_0/B:col10/1732141526132/Put/seqid=0 2024-11-20T22:25:26,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742260_1436 (size=12151) 2024-11-20T22:25:26,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/dcbfc89ac2ea44dab0745e77587eb48d 2024-11-20T22:25:27,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/d130131fa62b4dfaaf7add0429f0e793 is 50, key is test_row_0/C:col10/1732141526132/Put/seqid=0 2024-11-20T22:25:27,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:27,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:27,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742261_1437 (size=12151) 2024-11-20T22:25:27,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/d130131fa62b4dfaaf7add0429f0e793 2024-11-20T22:25:27,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/40c33c47c6a1419eabc4145442c06a32 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/40c33c47c6a1419eabc4145442c06a32 2024-11-20T22:25:27,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/40c33c47c6a1419eabc4145442c06a32, entries=150, sequenceid=240, filesize=30.4 K 2024-11-20T22:25:27,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/dcbfc89ac2ea44dab0745e77587eb48d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/dcbfc89ac2ea44dab0745e77587eb48d 2024-11-20T22:25:27,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/dcbfc89ac2ea44dab0745e77587eb48d, entries=150, sequenceid=240, filesize=11.9 K 2024-11-20T22:25:27,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/d130131fa62b4dfaaf7add0429f0e793 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/d130131fa62b4dfaaf7add0429f0e793 2024-11-20T22:25:27,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/d130131fa62b4dfaaf7add0429f0e793, entries=150, sequenceid=240, filesize=11.9 K 2024-11-20T22:25:27,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 2f10f015cf26343913efd4f9264f4075 in 313ms, sequenceid=240, compaction requested=true 2024-11-20T22:25:27,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:27,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:27,097 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:27,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:27,097 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:27,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:27,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:27,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:27,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:27,099 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:27,099 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/B is initiating minor compaction (all files) 2024-11-20T22:25:27,099 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/B in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,100 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/61d85a3085fc458bb2d0b93de3bc8c3b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6fe58802982042e08220365c721494a5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/0d9eb3c785c443ae8701415e5f559017, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/dcbfc89ac2ea44dab0745e77587eb48d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=47.9 K 2024-11-20T22:25:27,100 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124830 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:27,100 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/A is initiating minor compaction (all files) 2024-11-20T22:25:27,100 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/A in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,100 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f1aa112565e04d2daac44555676050ef, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/59194c5ac65241a6af2ca2c87e07c5b6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/a484817be80c461ea4b55a7fbe97c640, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/40c33c47c6a1419eabc4145442c06a32] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=121.9 K 2024-11-20T22:25:27,100 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,100 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f1aa112565e04d2daac44555676050ef, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/59194c5ac65241a6af2ca2c87e07c5b6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/a484817be80c461ea4b55a7fbe97c640, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/40c33c47c6a1419eabc4145442c06a32] 2024-11-20T22:25:27,100 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1aa112565e04d2daac44555676050ef, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141522688 2024-11-20T22:25:27,100 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 61d85a3085fc458bb2d0b93de3bc8c3b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141522688 2024-11-20T22:25:27,101 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fe58802982042e08220365c721494a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732141522746 2024-11-20T22:25:27,101 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59194c5ac65241a6af2ca2c87e07c5b6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732141522746 2024-11-20T22:25:27,101 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d9eb3c785c443ae8701415e5f559017, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732141524945 2024-11-20T22:25:27,101 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a484817be80c461ea4b55a7fbe97c640, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732141524945 2024-11-20T22:25:27,102 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40c33c47c6a1419eabc4145442c06a32, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732141526132 2024-11-20T22:25:27,102 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting dcbfc89ac2ea44dab0745e77587eb48d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732141526132 2024-11-20T22:25:27,121 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#B#compaction#363 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:27,121 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/7e51dbe8b91b4fb39bfc3bd5568d82ba is 50, key is test_row_0/B:col10/1732141526132/Put/seqid=0 2024-11-20T22:25:27,123 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:27,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:27,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:25:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:27,148 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411208d4657fc060d41df889feacd10ed275c_2f10f015cf26343913efd4f9264f4075 store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:27,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:27,151 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411208d4657fc060d41df889feacd10ed275c_2f10f015cf26343913efd4f9264f4075, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:27,151 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208d4657fc060d41df889feacd10ed275c_2f10f015cf26343913efd4f9264f4075 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:27,155 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:27,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:27,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742262_1438 (size=12697) 2024-11-20T22:25:27,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112017b87ab77317403b95a7c1d5485f79f8_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141526818/Put/seqid=0 2024-11-20T22:25:27,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742263_1439 (size=4469) 2024-11-20T22:25:27,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742264_1440 (size=14794) 2024-11-20T22:25:27,206 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:27,210 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112017b87ab77317403b95a7c1d5485f79f8_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112017b87ab77317403b95a7c1d5485f79f8_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:27,211 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c4e866077f8e4b17b6b84e5bc8d6ea89, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:27,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c4e866077f8e4b17b6b84e5bc8d6ea89 is 175, key is test_row_0/A:col10/1732141526818/Put/seqid=0 2024-11-20T22:25:27,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141587202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141587206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141587208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141587209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141587223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742265_1441 (size=39749) 2024-11-20T22:25:27,314 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:27,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:27,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141587319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141587320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141587325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141587325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141587331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:27,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:27,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:27,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141587529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141587534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141587535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141587535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141587542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,572 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/7e51dbe8b91b4fb39bfc3bd5568d82ba as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/7e51dbe8b91b4fb39bfc3bd5568d82ba 2024-11-20T22:25:27,579 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/B of 2f10f015cf26343913efd4f9264f4075 into 7e51dbe8b91b4fb39bfc3bd5568d82ba(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:27,579 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:27,579 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/B, priority=12, startTime=1732141527097; duration=0sec 2024-11-20T22:25:27,579 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:27,579 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:B 2024-11-20T22:25:27,579 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:27,593 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#A#compaction#364 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:27,593 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:27,593 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/C is initiating minor compaction (all files) 2024-11-20T22:25:27,593 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/C in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,594 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/9d51db45bf424689b88d6a2b595667eb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/146c2a5c46ba4dfeb93b792cdebf434a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/34d90e356ac8482ebed5484323ba7046, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/d130131fa62b4dfaaf7add0429f0e793] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=47.9 K 2024-11-20T22:25:27,594 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/20ea8a94ce9b4411bdb6887308fdeb25 is 175, key is test_row_0/A:col10/1732141526132/Put/seqid=0 2024-11-20T22:25:27,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d51db45bf424689b88d6a2b595667eb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732141522688 2024-11-20T22:25:27,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 146c2a5c46ba4dfeb93b792cdebf434a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732141522746 2024-11-20T22:25:27,596 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 34d90e356ac8482ebed5484323ba7046, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732141524945 2024-11-20T22:25:27,596 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d130131fa62b4dfaaf7add0429f0e793, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732141526132 2024-11-20T22:25:27,622 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742266_1442 (size=31651) 2024-11-20T22:25:27,634 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#C#compaction#366 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:27,634 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/f13e5621fa124cc78e286d8c0139e4c9 is 50, key is test_row_0/C:col10/1732141526132/Put/seqid=0 2024-11-20T22:25:27,638 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c4e866077f8e4b17b6b84e5bc8d6ea89 2024-11-20T22:25:27,648 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/20ea8a94ce9b4411bdb6887308fdeb25 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/20ea8a94ce9b4411bdb6887308fdeb25 2024-11-20T22:25:27,655 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/A of 2f10f015cf26343913efd4f9264f4075 into 20ea8a94ce9b4411bdb6887308fdeb25(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:27,655 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:27,655 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/A, priority=12, startTime=1732141527096; duration=0sec 2024-11-20T22:25:27,655 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:27,655 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:A 2024-11-20T22:25:27,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/e8bed5b379da4419a780bd8ba2cfe147 is 50, key is test_row_0/B:col10/1732141526818/Put/seqid=0 2024-11-20T22:25:27,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742267_1443 (size=12697) 2024-11-20T22:25:27,687 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/f13e5621fa124cc78e286d8c0139e4c9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f13e5621fa124cc78e286d8c0139e4c9 2024-11-20T22:25:27,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742268_1444 (size=12151) 2024-11-20T22:25:27,697 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/C of 2f10f015cf26343913efd4f9264f4075 into f13e5621fa124cc78e286d8c0139e4c9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:27,697 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:27,697 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/C, priority=12, startTime=1732141527097; duration=0sec 2024-11-20T22:25:27,698 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:27,698 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:C 2024-11-20T22:25:27,774 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:27,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:27,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141587839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141587841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141587844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141587844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141587853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,928 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:27,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:27,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:27,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:27,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:28,081 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:28,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:28,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/e8bed5b379da4419a780bd8ba2cfe147 2024-11-20T22:25:28,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/01b2517f9218408cae287efdfd935e5a is 50, key is test_row_0/C:col10/1732141526818/Put/seqid=0 2024-11-20T22:25:28,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742269_1445 (size=12151) 2024-11-20T22:25:28,235 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:28,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:28,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141588356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141588356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141588360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141588363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:28,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141588369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,390 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:28,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:28,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:28,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:28,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:28,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/01b2517f9218408cae287efdfd935e5a 2024-11-20T22:25:28,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c4e866077f8e4b17b6b84e5bc8d6ea89 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c4e866077f8e4b17b6b84e5bc8d6ea89 2024-11-20T22:25:28,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c4e866077f8e4b17b6b84e5bc8d6ea89, entries=200, sequenceid=254, filesize=38.8 K 2024-11-20T22:25:28,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/e8bed5b379da4419a780bd8ba2cfe147 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e8bed5b379da4419a780bd8ba2cfe147 2024-11-20T22:25:28,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e8bed5b379da4419a780bd8ba2cfe147, entries=150, sequenceid=254, filesize=11.9 K 2024-11-20T22:25:28,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/01b2517f9218408cae287efdfd935e5a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/01b2517f9218408cae287efdfd935e5a 2024-11-20T22:25:28,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/01b2517f9218408cae287efdfd935e5a, entries=150, sequenceid=254, filesize=11.9 K 2024-11-20T22:25:28,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 2f10f015cf26343913efd4f9264f4075 in 1461ms, sequenceid=254, compaction requested=false 2024-11-20T22:25:28,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:28,702 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:28,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T22:25:28,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:28,703 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:25:28,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:28,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:28,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:28,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:28,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:28,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:28,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200602ebe1ee2a41d988dbdf8e5bc11577_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141527199/Put/seqid=0 2024-11-20T22:25:28,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742270_1446 (size=12454) 2024-11-20T22:25:28,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:28,775 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200602ebe1ee2a41d988dbdf8e5bc11577_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200602ebe1ee2a41d988dbdf8e5bc11577_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:28,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c5640c1883e149518f3e57e29bf881a0, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:28,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c5640c1883e149518f3e57e29bf881a0 is 175, key is test_row_0/A:col10/1732141527199/Put/seqid=0 2024-11-20T22:25:28,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742271_1447 (size=31255) 2024-11-20T22:25:28,831 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=279, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c5640c1883e149518f3e57e29bf881a0 2024-11-20T22:25:28,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a08af0c261ff49b293a7dce8483498f8 is 50, key is test_row_0/B:col10/1732141527199/Put/seqid=0 2024-11-20T22:25:28,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742272_1448 (size=12301) 2024-11-20T22:25:28,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:29,319 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a08af0c261ff49b293a7dce8483498f8 2024-11-20T22:25:29,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/4ed4874e43054e58a1c90374153e2812 is 50, key is test_row_0/C:col10/1732141527199/Put/seqid=0 2024-11-20T22:25:29,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742273_1449 (size=12301) 2024-11-20T22:25:29,363 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/4ed4874e43054e58a1c90374153e2812 2024-11-20T22:25:29,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. as already flushing 2024-11-20T22:25:29,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:29,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c5640c1883e149518f3e57e29bf881a0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c5640c1883e149518f3e57e29bf881a0 2024-11-20T22:25:29,387 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c5640c1883e149518f3e57e29bf881a0, entries=150, sequenceid=279, filesize=30.5 K 2024-11-20T22:25:29,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/a08af0c261ff49b293a7dce8483498f8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a08af0c261ff49b293a7dce8483498f8 2024-11-20T22:25:29,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,395 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a08af0c261ff49b293a7dce8483498f8, entries=150, sequenceid=279, filesize=12.0 K 2024-11-20T22:25:29,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/4ed4874e43054e58a1c90374153e2812 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/4ed4874e43054e58a1c90374153e2812 2024-11-20T22:25:29,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,400 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/4ed4874e43054e58a1c90374153e2812, entries=150, sequenceid=279, filesize=12.0 K 2024-11-20T22:25:29,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,401 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=40.25 KB/41220 for 2f10f015cf26343913efd4f9264f4075 in 697ms, sequenceid=279, compaction requested=true 2024-11-20T22:25:29,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:29,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:29,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-20T22:25:29,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-20T22:25:29,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T22:25:29,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5530 sec 2024-11-20T22:25:29,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 2.5570 sec 2024-11-20T22:25:29,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:29,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:29,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:29,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:29,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:29,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:29,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:29,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:29,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d16b868aec564e1eaf44fd4737c41127_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141529387/Put/seqid=0 2024-11-20T22:25:29,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742274_1450 (size=20074) 2024-11-20T22:25:29,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141589537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141589539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141589540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141589543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141589544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141589650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141589650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141589651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141589655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141589656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141589859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141589860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141589861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141589863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:29,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141589864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:29,902 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:29,910 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d16b868aec564e1eaf44fd4737c41127_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d16b868aec564e1eaf44fd4737c41127_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:29,911 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/04b389c8358243608b028727ac1937a3, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:29,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/04b389c8358243608b028727ac1937a3 is 175, key is test_row_0/A:col10/1732141529387/Put/seqid=0 2024-11-20T22:25:29,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742275_1451 (size=57329) 2024-11-20T22:25:29,951 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/04b389c8358243608b028727ac1937a3 2024-11-20T22:25:29,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/93ccf3b64e944ef9a277a6f33d30a80f is 50, key is test_row_0/B:col10/1732141529387/Put/seqid=0 2024-11-20T22:25:30,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742276_1452 (size=9857) 2024-11-20T22:25:30,015 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/93ccf3b64e944ef9a277a6f33d30a80f 2024-11-20T22:25:30,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/0c415a04481544cea5e5ec3e41a579ad is 50, key is test_row_0/C:col10/1732141529387/Put/seqid=0 2024-11-20T22:25:30,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742277_1453 (size=9857) 2024-11-20T22:25:30,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/0c415a04481544cea5e5ec3e41a579ad 2024-11-20T22:25:30,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/04b389c8358243608b028727ac1937a3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/04b389c8358243608b028727ac1937a3 2024-11-20T22:25:30,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/04b389c8358243608b028727ac1937a3, entries=300, sequenceid=290, filesize=56.0 K 2024-11-20T22:25:30,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/93ccf3b64e944ef9a277a6f33d30a80f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/93ccf3b64e944ef9a277a6f33d30a80f 2024-11-20T22:25:30,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/93ccf3b64e944ef9a277a6f33d30a80f, entries=100, sequenceid=290, filesize=9.6 K 2024-11-20T22:25:30,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141590173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141590173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/0c415a04481544cea5e5ec3e41a579ad as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/0c415a04481544cea5e5ec3e41a579ad 2024-11-20T22:25:30,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/0c415a04481544cea5e5ec3e41a579ad, entries=100, sequenceid=290, filesize=9.6 K 2024-11-20T22:25:30,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 2f10f015cf26343913efd4f9264f4075 in 736ms, sequenceid=290, compaction requested=true 2024-11-20T22:25:30,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:30,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:30,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:30,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:30,187 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:30,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:30,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:30,187 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:30,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:30,190 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47006 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:30,190 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/B is initiating minor compaction (all files) 2024-11-20T22:25:30,190 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/B in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:30,190 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/7e51dbe8b91b4fb39bfc3bd5568d82ba, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e8bed5b379da4419a780bd8ba2cfe147, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a08af0c261ff49b293a7dce8483498f8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/93ccf3b64e944ef9a277a6f33d30a80f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=45.9 K 2024-11-20T22:25:30,190 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 159984 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:30,190 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/A is initiating minor compaction (all files) 2024-11-20T22:25:30,190 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/A in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:30,190 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/20ea8a94ce9b4411bdb6887308fdeb25, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c4e866077f8e4b17b6b84e5bc8d6ea89, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c5640c1883e149518f3e57e29bf881a0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/04b389c8358243608b028727ac1937a3] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=156.2 K 2024-11-20T22:25:30,190 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:30,190 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/20ea8a94ce9b4411bdb6887308fdeb25, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c4e866077f8e4b17b6b84e5bc8d6ea89, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c5640c1883e149518f3e57e29bf881a0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/04b389c8358243608b028727ac1937a3] 2024-11-20T22:25:30,191 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e51dbe8b91b4fb39bfc3bd5568d82ba, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732141526132 2024-11-20T22:25:30,191 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20ea8a94ce9b4411bdb6887308fdeb25, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732141526132 2024-11-20T22:25:30,191 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e8bed5b379da4419a780bd8ba2cfe147, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732141526816 2024-11-20T22:25:30,191 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4e866077f8e4b17b6b84e5bc8d6ea89, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732141526816 2024-11-20T22:25:30,191 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a08af0c261ff49b293a7dce8483498f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732141527199 2024-11-20T22:25:30,192 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 93ccf3b64e944ef9a277a6f33d30a80f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141529387 2024-11-20T22:25:30,192 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5640c1883e149518f3e57e29bf881a0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732141527199 2024-11-20T22:25:30,192 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04b389c8358243608b028727ac1937a3, keycount=300, bloomtype=ROW, size=56.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141529382 2024-11-20T22:25:30,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T22:25:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:30,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:30,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:30,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:30,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,217 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:30,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141590213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141590215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,225 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#B#compaction#376 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:30,225 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/6620e76e21a049e3befd474ab3d77bfa is 50, key is test_row_0/B:col10/1732141529387/Put/seqid=0 2024-11-20T22:25:30,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141590224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cec06a3d192a4c86946f8c19caa682c9_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141530192/Put/seqid=0 2024-11-20T22:25:30,260 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120539a7835e16c4a8fa2052add3c3845a3_2f10f015cf26343913efd4f9264f4075 store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:30,264 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120539a7835e16c4a8fa2052add3c3845a3_2f10f015cf26343913efd4f9264f4075, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:30,264 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120539a7835e16c4a8fa2052add3c3845a3_2f10f015cf26343913efd4f9264f4075 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:30,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742278_1454 (size=12983) 2024-11-20T22:25:30,312 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/6620e76e21a049e3befd474ab3d77bfa as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6620e76e21a049e3befd474ab3d77bfa 2024-11-20T22:25:30,318 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/B of 2f10f015cf26343913efd4f9264f4075 into 6620e76e21a049e3befd474ab3d77bfa(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:30,318 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:30,319 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/B, priority=12, startTime=1732141530187; duration=0sec 2024-11-20T22:25:30,319 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:30,319 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:B 2024-11-20T22:25:30,319 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:30,320 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47006 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:30,320 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/C is initiating minor compaction (all files) 2024-11-20T22:25:30,320 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/C in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:30,320 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f13e5621fa124cc78e286d8c0139e4c9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/01b2517f9218408cae287efdfd935e5a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/4ed4874e43054e58a1c90374153e2812, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/0c415a04481544cea5e5ec3e41a579ad] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=45.9 K 2024-11-20T22:25:30,322 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f13e5621fa124cc78e286d8c0139e4c9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732141526132 2024-11-20T22:25:30,323 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 01b2517f9218408cae287efdfd935e5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732141526816 2024-11-20T22:25:30,326 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ed4874e43054e58a1c90374153e2812, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732141527199 2024-11-20T22:25:30,328 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c415a04481544cea5e5ec3e41a579ad, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141529387 2024-11-20T22:25:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742279_1455 (size=17534) 2024-11-20T22:25:30,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742280_1456 (size=4469) 2024-11-20T22:25:30,356 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:30,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141590329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141590329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141590335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,365 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#C#compaction#378 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:30,365 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/575347b5099a4590b69b9465ff98c8b8 is 50, key is test_row_0/C:col10/1732141529387/Put/seqid=0 2024-11-20T22:25:30,368 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cec06a3d192a4c86946f8c19caa682c9_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cec06a3d192a4c86946f8c19caa682c9_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:30,370 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b3345d5d5b6b4d53a47ec5623a36e461, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:30,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b3345d5d5b6b4d53a47ec5623a36e461 is 175, key is test_row_0/A:col10/1732141530192/Put/seqid=0 2024-11-20T22:25:30,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742281_1457 (size=48639) 2024-11-20T22:25:30,420 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b3345d5d5b6b4d53a47ec5623a36e461 2024-11-20T22:25:30,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/c44682b5f1b9492c874ebfdad5ec1094 is 50, key is test_row_0/B:col10/1732141530192/Put/seqid=0 2024-11-20T22:25:30,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742282_1458 (size=12983) 2024-11-20T22:25:30,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742283_1459 (size=12301) 2024-11-20T22:25:30,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/c44682b5f1b9492c874ebfdad5ec1094 2024-11-20T22:25:30,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/f7066c581b2e4550b1e0eb7cf2ec7ec3 is 50, key is test_row_0/C:col10/1732141530192/Put/seqid=0 2024-11-20T22:25:30,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742284_1460 (size=12301) 2024-11-20T22:25:30,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/f7066c581b2e4550b1e0eb7cf2ec7ec3 2024-11-20T22:25:30,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b3345d5d5b6b4d53a47ec5623a36e461 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b3345d5d5b6b4d53a47ec5623a36e461 2024-11-20T22:25:30,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b3345d5d5b6b4d53a47ec5623a36e461, entries=250, sequenceid=318, filesize=47.5 K 2024-11-20T22:25:30,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/c44682b5f1b9492c874ebfdad5ec1094 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c44682b5f1b9492c874ebfdad5ec1094 2024-11-20T22:25:30,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c44682b5f1b9492c874ebfdad5ec1094, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T22:25:30,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/f7066c581b2e4550b1e0eb7cf2ec7ec3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f7066c581b2e4550b1e0eb7cf2ec7ec3 2024-11-20T22:25:30,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141590559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f7066c581b2e4550b1e0eb7cf2ec7ec3, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T22:25:30,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141590559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 2f10f015cf26343913efd4f9264f4075 in 386ms, sequenceid=318, compaction requested=false 2024-11-20T22:25:30,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:30,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:30,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:30,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:30,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:30,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:30,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209f274dde2fb9424587f1ae38d29057d7_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141530212/Put/seqid=0 2024-11-20T22:25:30,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742285_1461 (size=14994) 2024-11-20T22:25:30,759 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#A#compaction#375 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:30,760 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/f40bf80478dc472a820ac37ad8cf8c57 is 175, key is test_row_0/A:col10/1732141529387/Put/seqid=0 2024-11-20T22:25:30,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742286_1462 (size=32044) 2024-11-20T22:25:30,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141590775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141590773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141590779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,852 DEBUG [Thread-1681 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4a7ac296 to 127.0.0.1:51916 2024-11-20T22:25:30,852 DEBUG [Thread-1681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:30,852 DEBUG [Thread-1683 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7664cff8 to 127.0.0.1:51916 2024-11-20T22:25:30,852 DEBUG [Thread-1683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:30,855 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/575347b5099a4590b69b9465ff98c8b8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/575347b5099a4590b69b9465ff98c8b8 2024-11-20T22:25:30,865 DEBUG [Thread-1689 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39448763 to 127.0.0.1:51916 2024-11-20T22:25:30,865 DEBUG [Thread-1689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:30,867 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/C of 2f10f015cf26343913efd4f9264f4075 into 575347b5099a4590b69b9465ff98c8b8(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:30,867 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:30,867 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/C, priority=12, startTime=1732141530187; duration=0sec 2024-11-20T22:25:30,867 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:30,867 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:C 2024-11-20T22:25:30,867 DEBUG [Thread-1687 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67bf566d to 127.0.0.1:51916 2024-11-20T22:25:30,868 DEBUG [Thread-1687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:30,868 DEBUG [Thread-1685 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x375e0d48 to 127.0.0.1:51916 2024-11-20T22:25:30,868 DEBUG [Thread-1685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:30,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141590882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141590880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141590892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:30,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141590892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141590892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:30,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T22:25:30,958 INFO [Thread-1680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-20T22:25:31,091 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:31,102 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209f274dde2fb9424587f1ae38d29057d7_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209f274dde2fb9424587f1ae38d29057d7_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:31,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141591102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:31,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141591102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:31,103 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/bd7a4f31cb2744de9b3ba224e8b637b7, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:31,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141591103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:31,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/bd7a4f31cb2744de9b3ba224e8b637b7 is 175, key is test_row_0/A:col10/1732141530212/Put/seqid=0 2024-11-20T22:25:31,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742287_1463 (size=39949) 2024-11-20T22:25:31,129 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/bd7a4f31cb2744de9b3ba224e8b637b7 2024-11-20T22:25:31,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/231ebdbdfa1a47059af9567abce61c08 is 50, key is test_row_0/B:col10/1732141530212/Put/seqid=0 2024-11-20T22:25:31,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742288_1464 (size=12301) 2024-11-20T22:25:31,207 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/f40bf80478dc472a820ac37ad8cf8c57 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f40bf80478dc472a820ac37ad8cf8c57 2024-11-20T22:25:31,217 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/A of 2f10f015cf26343913efd4f9264f4075 into f40bf80478dc472a820ac37ad8cf8c57(size=31.3 K), total size for store is 78.8 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-11-20T22:25:31,217 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:31,217 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/A, priority=12, startTime=1732141530187; duration=1sec 2024-11-20T22:25:31,217 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,217 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:A 2024-11-20T22:25:31,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54430 deadline: 1732141591388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:31,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54500 deadline: 1732141591395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:31,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54504 deadline: 1732141591406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:31,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54426 deadline: 1732141591410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:31,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:31,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54442 deadline: 1732141591414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:31,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/231ebdbdfa1a47059af9567abce61c08 2024-11-20T22:25:31,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/2d6425b9d17044dc867d7865e3e36b3f is 50, key is test_row_0/C:col10/1732141530212/Put/seqid=0 2024-11-20T22:25:31,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742289_1465 (size=12301) 2024-11-20T22:25:31,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/2d6425b9d17044dc867d7865e3e36b3f 2024-11-20T22:25:31,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/bd7a4f31cb2744de9b3ba224e8b637b7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/bd7a4f31cb2744de9b3ba224e8b637b7 2024-11-20T22:25:31,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/bd7a4f31cb2744de9b3ba224e8b637b7, entries=200, sequenceid=330, filesize=39.0 K 2024-11-20T22:25:31,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/231ebdbdfa1a47059af9567abce61c08 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/231ebdbdfa1a47059af9567abce61c08 2024-11-20T22:25:31,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/231ebdbdfa1a47059af9567abce61c08, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:25:31,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/2d6425b9d17044dc867d7865e3e36b3f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/2d6425b9d17044dc867d7865e3e36b3f 2024-11-20T22:25:31,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/2d6425b9d17044dc867d7865e3e36b3f, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T22:25:31,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2f10f015cf26343913efd4f9264f4075 in 1061ms, sequenceid=330, compaction requested=true 2024-11-20T22:25:31,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:31,665 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:31,666 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120632 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:31,666 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/A is initiating minor compaction (all files) 2024-11-20T22:25:31,666 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/A in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:31,666 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f40bf80478dc472a820ac37ad8cf8c57, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b3345d5d5b6b4d53a47ec5623a36e461, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/bd7a4f31cb2744de9b3ba224e8b637b7] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=117.8 K 2024-11-20T22:25:31,666 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:31,666 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f40bf80478dc472a820ac37ad8cf8c57, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b3345d5d5b6b4d53a47ec5623a36e461, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/bd7a4f31cb2744de9b3ba224e8b637b7] 2024-11-20T22:25:31,667 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f40bf80478dc472a820ac37ad8cf8c57, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141527207 2024-11-20T22:25:31,667 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3345d5d5b6b4d53a47ec5623a36e461, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732141529516 2024-11-20T22:25:31,667 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd7a4f31cb2744de9b3ba224e8b637b7, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141530212 2024-11-20T22:25:31,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:31,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,675 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:31,676 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:31,676 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/B is initiating minor compaction (all files) 2024-11-20T22:25:31,676 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/B in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:31,677 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6620e76e21a049e3befd474ab3d77bfa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c44682b5f1b9492c874ebfdad5ec1094, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/231ebdbdfa1a47059af9567abce61c08] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=36.7 K 2024-11-20T22:25:31,677 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6620e76e21a049e3befd474ab3d77bfa, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141527207 2024-11-20T22:25:31,677 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c44682b5f1b9492c874ebfdad5ec1094, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732141529539 2024-11-20T22:25:31,677 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 231ebdbdfa1a47059af9567abce61c08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141530212 2024-11-20T22:25:31,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:31,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2f10f015cf26343913efd4f9264f4075:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:31,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:31,683 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:31,696 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120f09bbf04b21d4acdb9d15c06f919b05d_2f10f015cf26343913efd4f9264f4075 store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:31,701 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120f09bbf04b21d4acdb9d15c06f919b05d_2f10f015cf26343913efd4f9264f4075, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:31,701 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f09bbf04b21d4acdb9d15c06f919b05d_2f10f015cf26343913efd4f9264f4075 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:31,703 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#B#compaction#385 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:31,713 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/e63fef19226345ad8ccd2aa532ca0d27 is 50, key is test_row_0/B:col10/1732141530212/Put/seqid=0 2024-11-20T22:25:31,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742291_1467 (size=13085) 2024-11-20T22:25:31,744 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/e63fef19226345ad8ccd2aa532ca0d27 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e63fef19226345ad8ccd2aa532ca0d27 2024-11-20T22:25:31,751 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/B of 2f10f015cf26343913efd4f9264f4075 into e63fef19226345ad8ccd2aa532ca0d27(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:31,751 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:31,751 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/B, priority=13, startTime=1732141531675; duration=0sec 2024-11-20T22:25:31,751 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:31,751 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:B 2024-11-20T22:25:31,751 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:31,752 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:31,752 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 2f10f015cf26343913efd4f9264f4075/C is initiating minor compaction (all files) 2024-11-20T22:25:31,753 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2f10f015cf26343913efd4f9264f4075/C in TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:31,753 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/575347b5099a4590b69b9465ff98c8b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f7066c581b2e4550b1e0eb7cf2ec7ec3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/2d6425b9d17044dc867d7865e3e36b3f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp, totalSize=36.7 K 2024-11-20T22:25:31,753 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 575347b5099a4590b69b9465ff98c8b8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732141527207 2024-11-20T22:25:31,753 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f7066c581b2e4550b1e0eb7cf2ec7ec3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732141529539 2024-11-20T22:25:31,754 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d6425b9d17044dc867d7865e3e36b3f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732141530212 2024-11-20T22:25:31,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742290_1466 (size=4469) 2024-11-20T22:25:31,779 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#A#compaction#384 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:31,780 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b1840d95fb714ae99a1f1ddfd841e34e is 175, key is test_row_0/A:col10/1732141530212/Put/seqid=0 2024-11-20T22:25:31,783 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2f10f015cf26343913efd4f9264f4075#C#compaction#386 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:31,784 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/fe1d08b0e4474800b23293de9362fe10 is 50, key is test_row_0/C:col10/1732141530212/Put/seqid=0 2024-11-20T22:25:31,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742292_1468 (size=32039) 2024-11-20T22:25:31,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742293_1469 (size=13085) 2024-11-20T22:25:31,824 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/b1840d95fb714ae99a1f1ddfd841e34e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b1840d95fb714ae99a1f1ddfd841e34e 2024-11-20T22:25:31,830 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/A of 2f10f015cf26343913efd4f9264f4075 into b1840d95fb714ae99a1f1ddfd841e34e(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:31,830 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:31,831 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/A, priority=13, startTime=1732141531665; duration=0sec 2024-11-20T22:25:31,831 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,831 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:A 2024-11-20T22:25:31,857 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/fe1d08b0e4474800b23293de9362fe10 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/fe1d08b0e4474800b23293de9362fe10 2024-11-20T22:25:31,865 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2f10f015cf26343913efd4f9264f4075/C of 2f10f015cf26343913efd4f9264f4075 into fe1d08b0e4474800b23293de9362fe10(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:31,865 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:31,865 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075., storeName=2f10f015cf26343913efd4f9264f4075/C, priority=13, startTime=1732141531678; duration=0sec 2024-11-20T22:25:31,865 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:31,865 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2f10f015cf26343913efd4f9264f4075:C 2024-11-20T22:25:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:31,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:25:31,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:31,910 DEBUG [Thread-1670 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40f02431 to 127.0.0.1:51916 2024-11-20T22:25:31,911 DEBUG [Thread-1670 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:31,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:31,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:31,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:31,918 DEBUG [Thread-1678 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07028425 to 127.0.0.1:51916 2024-11-20T22:25:31,918 DEBUG [Thread-1678 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:31,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112012e21772de1349d69af9f6bd168fb058_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141531909/Put/seqid=0 2024-11-20T22:25:31,920 DEBUG [Thread-1676 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4077c593 to 127.0.0.1:51916 2024-11-20T22:25:31,920 DEBUG [Thread-1676 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:31,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742294_1470 (size=12454) 2024-11-20T22:25:32,337 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:32,352 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112012e21772de1349d69af9f6bd168fb058_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112012e21772de1349d69af9f6bd168fb058_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:32,353 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/55a6aad5540d4a77919b13e7713332e2, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:32,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/55a6aad5540d4a77919b13e7713332e2 is 175, key is test_row_0/A:col10/1732141531909/Put/seqid=0 2024-11-20T22:25:32,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742295_1471 (size=31255) 2024-11-20T22:25:32,400 DEBUG [Thread-1674 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b466e6f to 127.0.0.1:51916 2024-11-20T22:25:32,400 DEBUG [Thread-1674 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:32,404 DEBUG [Thread-1672 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40195d2e to 127.0.0.1:51916 2024-11-20T22:25:32,404 DEBUG [Thread-1672 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:32,404 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:25:32,404 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-11-20T22:25:32,404 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1315 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3945 rows 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1307 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3921 rows 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1318 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3954 rows 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1324 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3972 rows 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1315 2024-11-20T22:25:32,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3945 rows 2024-11-20T22:25:32,405 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:25:32,405 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c777cbc to 127.0.0.1:51916 2024-11-20T22:25:32,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:32,408 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:25:32,409 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:25:32,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:32,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:32,413 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141532412"}]},"ts":"1732141532412"} 2024-11-20T22:25:32,413 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:25:32,490 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:25:32,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:25:32,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=113, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, UNASSIGN}] 2024-11-20T22:25:32,492 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=114, ppid=113, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, UNASSIGN 2024-11-20T22:25:32,493 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=114 updating hbase:meta row=2f10f015cf26343913efd4f9264f4075, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:32,494 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:25:32,494 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; CloseRegionProcedure 2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:25:32,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:32,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:32,645 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] handler.UnassignRegionHandler(124): Close 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:32,645 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:25:32,645 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(1681): Closing 2f10f015cf26343913efd4f9264f4075, disabling compactions & flushes 2024-11-20T22:25:32,645 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:32,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:32,761 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=361, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/55a6aad5540d4a77919b13e7713332e2 2024-11-20T22:25:32,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/36e73a93956d4df3834e71b65b5bdea2 is 50, key is test_row_0/B:col10/1732141531909/Put/seqid=0 2024-11-20T22:25:32,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742296_1472 (size=12301) 2024-11-20T22:25:32,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/36e73a93956d4df3834e71b65b5bdea2 2024-11-20T22:25:32,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/07c75e033e744b549e28ce4ca9dd1a62 is 50, key is test_row_0/C:col10/1732141531909/Put/seqid=0 2024-11-20T22:25:32,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742297_1473 (size=12301) 2024-11-20T22:25:32,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/07c75e033e744b549e28ce4ca9dd1a62 2024-11-20T22:25:32,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/55a6aad5540d4a77919b13e7713332e2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/55a6aad5540d4a77919b13e7713332e2 2024-11-20T22:25:32,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/55a6aad5540d4a77919b13e7713332e2, entries=150, sequenceid=361, filesize=30.5 K 2024-11-20T22:25:32,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/36e73a93956d4df3834e71b65b5bdea2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/36e73a93956d4df3834e71b65b5bdea2 2024-11-20T22:25:32,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/36e73a93956d4df3834e71b65b5bdea2, entries=150, sequenceid=361, filesize=12.0 K 2024-11-20T22:25:32,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/07c75e033e744b549e28ce4ca9dd1a62 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/07c75e033e744b549e28ce4ca9dd1a62 2024-11-20T22:25:32,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/07c75e033e744b549e28ce4ca9dd1a62, entries=150, sequenceid=361, filesize=12.0 K 2024-11-20T22:25:32,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=26.84 KB/27480 for 2f10f015cf26343913efd4f9264f4075 in 916ms, sequenceid=361, compaction requested=false 2024-11-20T22:25:32,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:32,826 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:32,826 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:32,826 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. after waiting 0 ms 2024-11-20T22:25:32,826 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:32,826 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(2837): Flushing 2f10f015cf26343913efd4f9264f4075 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T22:25:32,827 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=A 2024-11-20T22:25:32,827 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:32,827 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=B 2024-11-20T22:25:32,827 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:32,827 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2f10f015cf26343913efd4f9264f4075, store=C 2024-11-20T22:25:32,827 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:32,842 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204fdf3a5f3d204f54b053484f85ecda07_2f10f015cf26343913efd4f9264f4075 is 50, key is test_row_0/A:col10/1732141532403/Put/seqid=0 2024-11-20T22:25:32,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742298_1474 (size=9914) 2024-11-20T22:25:32,863 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:32,868 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204fdf3a5f3d204f54b053484f85ecda07_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204fdf3a5f3d204f54b053484f85ecda07_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:32,869 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c3277729ba9a4e4d80c8754a6bf2eff3, store: [table=TestAcidGuarantees family=A region=2f10f015cf26343913efd4f9264f4075] 2024-11-20T22:25:32,869 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c3277729ba9a4e4d80c8754a6bf2eff3 is 175, key is test_row_0/A:col10/1732141532403/Put/seqid=0 2024-11-20T22:25:32,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742299_1475 (size=22561) 2024-11-20T22:25:32,882 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=368, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c3277729ba9a4e4d80c8754a6bf2eff3 2024-11-20T22:25:32,890 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/c96abfa7913b45fa84f0b2c468a3cf2b is 50, key is test_row_0/B:col10/1732141532403/Put/seqid=0 2024-11-20T22:25:32,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742300_1476 (size=9857) 2024-11-20T22:25:33,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:33,294 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/c96abfa7913b45fa84f0b2c468a3cf2b 2024-11-20T22:25:33,308 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b88be397e2f245f698339b945f09fb89 is 50, key is test_row_0/C:col10/1732141532403/Put/seqid=0 2024-11-20T22:25:33,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742301_1477 (size=9857) 2024-11-20T22:25:33,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:33,713 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b88be397e2f245f698339b945f09fb89 2024-11-20T22:25:33,717 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/A/c3277729ba9a4e4d80c8754a6bf2eff3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c3277729ba9a4e4d80c8754a6bf2eff3 2024-11-20T22:25:33,720 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c3277729ba9a4e4d80c8754a6bf2eff3, entries=100, sequenceid=368, filesize=22.0 K 2024-11-20T22:25:33,721 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/B/c96abfa7913b45fa84f0b2c468a3cf2b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c96abfa7913b45fa84f0b2c468a3cf2b 2024-11-20T22:25:33,724 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c96abfa7913b45fa84f0b2c468a3cf2b, entries=100, sequenceid=368, filesize=9.6 K 2024-11-20T22:25:33,725 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/.tmp/C/b88be397e2f245f698339b945f09fb89 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b88be397e2f245f698339b945f09fb89 2024-11-20T22:25:33,728 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b88be397e2f245f698339b945f09fb89, entries=100, sequenceid=368, filesize=9.6 K 2024-11-20T22:25:33,728 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 2f10f015cf26343913efd4f9264f4075 in 902ms, sequenceid=368, compaction requested=true 2024-11-20T22:25:33,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f570b0331a374ffa9d5de990b34e506f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/6f10171f76ee4e5ab086be66a7a41ef0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/ac55b983def940df9aba05a7fb944c22, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/68cd797a3ac6467d90d6e11fb0da4566, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/54b1e87320184af2afd728a37914eb04, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b564f774f9b541cdb28ad7d59e8cb9d1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c277024ff13248b2a0f221939aeae81b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/01940ff525ed40db85755e69c48cc29c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fb52741587f34b73b2b8808848571214, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/84aefa4a8d2a477ba61cce859787f4cd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/500131a98eec427e8997ef5a3cb19278, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fc58fd30fe0b405abcf6ecb358f2d290, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f1aa112565e04d2daac44555676050ef, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/59194c5ac65241a6af2ca2c87e07c5b6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/a484817be80c461ea4b55a7fbe97c640, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/20ea8a94ce9b4411bdb6887308fdeb25, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/40c33c47c6a1419eabc4145442c06a32, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c4e866077f8e4b17b6b84e5bc8d6ea89, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c5640c1883e149518f3e57e29bf881a0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/04b389c8358243608b028727ac1937a3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f40bf80478dc472a820ac37ad8cf8c57, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b3345d5d5b6b4d53a47ec5623a36e461, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/bd7a4f31cb2744de9b3ba224e8b637b7] to archive 2024-11-20T22:25:33,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:33,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f570b0331a374ffa9d5de990b34e506f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f570b0331a374ffa9d5de990b34e506f 2024-11-20T22:25:33,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/6f10171f76ee4e5ab086be66a7a41ef0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/6f10171f76ee4e5ab086be66a7a41ef0 2024-11-20T22:25:33,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/ac55b983def940df9aba05a7fb944c22 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/ac55b983def940df9aba05a7fb944c22 2024-11-20T22:25:33,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/68cd797a3ac6467d90d6e11fb0da4566 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/68cd797a3ac6467d90d6e11fb0da4566 2024-11-20T22:25:33,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/54b1e87320184af2afd728a37914eb04 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/54b1e87320184af2afd728a37914eb04 2024-11-20T22:25:33,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b564f774f9b541cdb28ad7d59e8cb9d1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b564f774f9b541cdb28ad7d59e8cb9d1 2024-11-20T22:25:33,739 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c277024ff13248b2a0f221939aeae81b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c277024ff13248b2a0f221939aeae81b 2024-11-20T22:25:33,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/01940ff525ed40db85755e69c48cc29c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/01940ff525ed40db85755e69c48cc29c 2024-11-20T22:25:33,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fb52741587f34b73b2b8808848571214 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fb52741587f34b73b2b8808848571214 2024-11-20T22:25:33,743 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/84aefa4a8d2a477ba61cce859787f4cd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/84aefa4a8d2a477ba61cce859787f4cd 2024-11-20T22:25:33,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/500131a98eec427e8997ef5a3cb19278 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/500131a98eec427e8997ef5a3cb19278 2024-11-20T22:25:33,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fc58fd30fe0b405abcf6ecb358f2d290 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/fc58fd30fe0b405abcf6ecb358f2d290 2024-11-20T22:25:33,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f1aa112565e04d2daac44555676050ef to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f1aa112565e04d2daac44555676050ef 2024-11-20T22:25:33,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/59194c5ac65241a6af2ca2c87e07c5b6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/59194c5ac65241a6af2ca2c87e07c5b6 2024-11-20T22:25:33,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/a484817be80c461ea4b55a7fbe97c640 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/a484817be80c461ea4b55a7fbe97c640 2024-11-20T22:25:33,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/20ea8a94ce9b4411bdb6887308fdeb25 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/20ea8a94ce9b4411bdb6887308fdeb25 2024-11-20T22:25:33,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/40c33c47c6a1419eabc4145442c06a32 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/40c33c47c6a1419eabc4145442c06a32 2024-11-20T22:25:33,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c4e866077f8e4b17b6b84e5bc8d6ea89 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c4e866077f8e4b17b6b84e5bc8d6ea89 2024-11-20T22:25:33,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c5640c1883e149518f3e57e29bf881a0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c5640c1883e149518f3e57e29bf881a0 2024-11-20T22:25:33,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/04b389c8358243608b028727ac1937a3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/04b389c8358243608b028727ac1937a3 2024-11-20T22:25:33,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f40bf80478dc472a820ac37ad8cf8c57 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/f40bf80478dc472a820ac37ad8cf8c57 2024-11-20T22:25:33,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b3345d5d5b6b4d53a47ec5623a36e461 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b3345d5d5b6b4d53a47ec5623a36e461 2024-11-20T22:25:33,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/bd7a4f31cb2744de9b3ba224e8b637b7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/bd7a4f31cb2744de9b3ba224e8b637b7 2024-11-20T22:25:33,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/95db2d6419724d57b92abd773d7bdb05, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/12a91dd2903a414e9fe98b71cd5cb409, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/340712cf9b72405187938d72aaefd634, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a8f88efb087e400ab8416c2a50c88539, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a94f044e2ddc4a719e970236bd4ba38b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/4880bd7b53fa43e2b616f0b766bf490b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/d517e258f8e94f3f8e435dedc61b60c7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a41b1a67d53146c199d6d3f351fae93d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/9675f95691b64604999fcf732a026200, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/ee0c4892408047b8a34526b2c0935f18, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/1cf6f51712554db2b07cb5f0cf2cd275, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/61d85a3085fc458bb2d0b93de3bc8c3b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/2a76dacc5334432c8192cb18c817705c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6fe58802982042e08220365c721494a5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/0d9eb3c785c443ae8701415e5f559017, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/7e51dbe8b91b4fb39bfc3bd5568d82ba, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/dcbfc89ac2ea44dab0745e77587eb48d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e8bed5b379da4419a780bd8ba2cfe147, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a08af0c261ff49b293a7dce8483498f8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6620e76e21a049e3befd474ab3d77bfa, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/93ccf3b64e944ef9a277a6f33d30a80f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c44682b5f1b9492c874ebfdad5ec1094, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/231ebdbdfa1a47059af9567abce61c08] to archive 2024-11-20T22:25:33,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:33,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/95db2d6419724d57b92abd773d7bdb05 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/95db2d6419724d57b92abd773d7bdb05 2024-11-20T22:25:33,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/12a91dd2903a414e9fe98b71cd5cb409 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/12a91dd2903a414e9fe98b71cd5cb409 2024-11-20T22:25:33,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/340712cf9b72405187938d72aaefd634 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/340712cf9b72405187938d72aaefd634 2024-11-20T22:25:33,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a8f88efb087e400ab8416c2a50c88539 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a8f88efb087e400ab8416c2a50c88539 2024-11-20T22:25:33,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a94f044e2ddc4a719e970236bd4ba38b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a94f044e2ddc4a719e970236bd4ba38b 2024-11-20T22:25:33,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/4880bd7b53fa43e2b616f0b766bf490b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/4880bd7b53fa43e2b616f0b766bf490b 2024-11-20T22:25:33,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/d517e258f8e94f3f8e435dedc61b60c7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/d517e258f8e94f3f8e435dedc61b60c7 2024-11-20T22:25:33,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a41b1a67d53146c199d6d3f351fae93d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a41b1a67d53146c199d6d3f351fae93d 2024-11-20T22:25:33,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/9675f95691b64604999fcf732a026200 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/9675f95691b64604999fcf732a026200 2024-11-20T22:25:33,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/ee0c4892408047b8a34526b2c0935f18 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/ee0c4892408047b8a34526b2c0935f18 2024-11-20T22:25:33,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/1cf6f51712554db2b07cb5f0cf2cd275 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/1cf6f51712554db2b07cb5f0cf2cd275 2024-11-20T22:25:33,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/61d85a3085fc458bb2d0b93de3bc8c3b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/61d85a3085fc458bb2d0b93de3bc8c3b 2024-11-20T22:25:33,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/2a76dacc5334432c8192cb18c817705c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/2a76dacc5334432c8192cb18c817705c 2024-11-20T22:25:33,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6fe58802982042e08220365c721494a5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6fe58802982042e08220365c721494a5 2024-11-20T22:25:33,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/0d9eb3c785c443ae8701415e5f559017 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/0d9eb3c785c443ae8701415e5f559017 2024-11-20T22:25:33,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/7e51dbe8b91b4fb39bfc3bd5568d82ba to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/7e51dbe8b91b4fb39bfc3bd5568d82ba 2024-11-20T22:25:33,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/dcbfc89ac2ea44dab0745e77587eb48d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/dcbfc89ac2ea44dab0745e77587eb48d 2024-11-20T22:25:33,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e8bed5b379da4419a780bd8ba2cfe147 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e8bed5b379da4419a780bd8ba2cfe147 2024-11-20T22:25:33,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a08af0c261ff49b293a7dce8483498f8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/a08af0c261ff49b293a7dce8483498f8 2024-11-20T22:25:33,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6620e76e21a049e3befd474ab3d77bfa to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/6620e76e21a049e3befd474ab3d77bfa 2024-11-20T22:25:33,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/93ccf3b64e944ef9a277a6f33d30a80f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/93ccf3b64e944ef9a277a6f33d30a80f 2024-11-20T22:25:33,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c44682b5f1b9492c874ebfdad5ec1094 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c44682b5f1b9492c874ebfdad5ec1094 2024-11-20T22:25:33,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/231ebdbdfa1a47059af9567abce61c08 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/231ebdbdfa1a47059af9567abce61c08 2024-11-20T22:25:33,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/e4b3ea594e8040f799d91a1646c8a929, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/092577cca0c941908f61128a39999a79, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b2e09096ee8f437783d4518a7605eeb2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/68fe8863067044e2b5690368c23c12ca, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/abe509720d9648098642529de58a700f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/fa2765e5d41147fc9af083695fb409e2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/81b2cd9c005e4741a544933bf09be970, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/101f7056daf549acb4cf9a5f4101fba0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b8bba32c56a14bdcbd89fe686d9dcca9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b1410ec42af24a039e601812b77b77b9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/232aea56b9ad421bb1f993a605142024, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/9d51db45bf424689b88d6a2b595667eb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/adc38507df7b495e84594a8597e9f086, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/146c2a5c46ba4dfeb93b792cdebf434a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/34d90e356ac8482ebed5484323ba7046, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f13e5621fa124cc78e286d8c0139e4c9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/d130131fa62b4dfaaf7add0429f0e793, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/01b2517f9218408cae287efdfd935e5a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/4ed4874e43054e58a1c90374153e2812, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/575347b5099a4590b69b9465ff98c8b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/0c415a04481544cea5e5ec3e41a579ad, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f7066c581b2e4550b1e0eb7cf2ec7ec3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/2d6425b9d17044dc867d7865e3e36b3f] to archive 2024-11-20T22:25:33,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:33,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/e4b3ea594e8040f799d91a1646c8a929 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/e4b3ea594e8040f799d91a1646c8a929 2024-11-20T22:25:33,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/092577cca0c941908f61128a39999a79 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/092577cca0c941908f61128a39999a79 2024-11-20T22:25:33,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b2e09096ee8f437783d4518a7605eeb2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b2e09096ee8f437783d4518a7605eeb2 2024-11-20T22:25:33,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/68fe8863067044e2b5690368c23c12ca to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/68fe8863067044e2b5690368c23c12ca 2024-11-20T22:25:33,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/abe509720d9648098642529de58a700f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/abe509720d9648098642529de58a700f 2024-11-20T22:25:33,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/fa2765e5d41147fc9af083695fb409e2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/fa2765e5d41147fc9af083695fb409e2 2024-11-20T22:25:33,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/81b2cd9c005e4741a544933bf09be970 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/81b2cd9c005e4741a544933bf09be970 2024-11-20T22:25:33,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/101f7056daf549acb4cf9a5f4101fba0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/101f7056daf549acb4cf9a5f4101fba0 2024-11-20T22:25:33,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b8bba32c56a14bdcbd89fe686d9dcca9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b8bba32c56a14bdcbd89fe686d9dcca9 2024-11-20T22:25:33,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b1410ec42af24a039e601812b77b77b9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b1410ec42af24a039e601812b77b77b9 2024-11-20T22:25:33,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/232aea56b9ad421bb1f993a605142024 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/232aea56b9ad421bb1f993a605142024 2024-11-20T22:25:33,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/9d51db45bf424689b88d6a2b595667eb to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/9d51db45bf424689b88d6a2b595667eb 2024-11-20T22:25:33,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/adc38507df7b495e84594a8597e9f086 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/adc38507df7b495e84594a8597e9f086 2024-11-20T22:25:33,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/146c2a5c46ba4dfeb93b792cdebf434a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/146c2a5c46ba4dfeb93b792cdebf434a 2024-11-20T22:25:33,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/34d90e356ac8482ebed5484323ba7046 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/34d90e356ac8482ebed5484323ba7046 2024-11-20T22:25:33,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f13e5621fa124cc78e286d8c0139e4c9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f13e5621fa124cc78e286d8c0139e4c9 2024-11-20T22:25:33,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/d130131fa62b4dfaaf7add0429f0e793 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/d130131fa62b4dfaaf7add0429f0e793 2024-11-20T22:25:33,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/01b2517f9218408cae287efdfd935e5a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/01b2517f9218408cae287efdfd935e5a 2024-11-20T22:25:33,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/4ed4874e43054e58a1c90374153e2812 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/4ed4874e43054e58a1c90374153e2812 2024-11-20T22:25:33,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/575347b5099a4590b69b9465ff98c8b8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/575347b5099a4590b69b9465ff98c8b8 2024-11-20T22:25:33,818 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/0c415a04481544cea5e5ec3e41a579ad to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/0c415a04481544cea5e5ec3e41a579ad 2024-11-20T22:25:33,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f7066c581b2e4550b1e0eb7cf2ec7ec3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/f7066c581b2e4550b1e0eb7cf2ec7ec3 2024-11-20T22:25:33,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/2d6425b9d17044dc867d7865e3e36b3f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/2d6425b9d17044dc867d7865e3e36b3f 2024-11-20T22:25:33,825 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/recovered.edits/371.seqid, newMaxSeqId=371, maxSeqId=4 2024-11-20T22:25:33,825 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075. 2024-11-20T22:25:33,825 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] regionserver.HRegion(1635): Region close journal for 2f10f015cf26343913efd4f9264f4075: 2024-11-20T22:25:33,827 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=115}] handler.UnassignRegionHandler(170): Closed 2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:33,827 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=114 updating hbase:meta row=2f10f015cf26343913efd4f9264f4075, regionState=CLOSED 2024-11-20T22:25:33,829 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-20T22:25:33,829 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; CloseRegionProcedure 2f10f015cf26343913efd4f9264f4075, server=6365a1e51efd,46811,1732141422048 in 1.3340 sec 2024-11-20T22:25:33,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=113 2024-11-20T22:25:33,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=113, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2f10f015cf26343913efd4f9264f4075, UNASSIGN in 1.3370 sec 2024-11-20T22:25:33,832 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-20T22:25:33,832 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.3400 sec 2024-11-20T22:25:33,833 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141533833"}]},"ts":"1732141533833"} 2024-11-20T22:25:33,834 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:25:33,842 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:25:33,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4340 sec 2024-11-20T22:25:34,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T22:25:34,519 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-20T22:25:34,519 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:25:34,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:34,521 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=116, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:34,521 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=116, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:34,527 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,535 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/recovered.edits] 2024-11-20T22:25:34,537 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/55a6aad5540d4a77919b13e7713332e2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/55a6aad5540d4a77919b13e7713332e2 2024-11-20T22:25:34,538 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b1840d95fb714ae99a1f1ddfd841e34e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/b1840d95fb714ae99a1f1ddfd841e34e 2024-11-20T22:25:34,539 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c3277729ba9a4e4d80c8754a6bf2eff3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/A/c3277729ba9a4e4d80c8754a6bf2eff3 2024-11-20T22:25:34,544 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/36e73a93956d4df3834e71b65b5bdea2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/36e73a93956d4df3834e71b65b5bdea2 2024-11-20T22:25:34,545 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c96abfa7913b45fa84f0b2c468a3cf2b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/c96abfa7913b45fa84f0b2c468a3cf2b 2024-11-20T22:25:34,546 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e63fef19226345ad8ccd2aa532ca0d27 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/B/e63fef19226345ad8ccd2aa532ca0d27 2024-11-20T22:25:34,556 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/07c75e033e744b549e28ce4ca9dd1a62 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/07c75e033e744b549e28ce4ca9dd1a62 2024-11-20T22:25:34,573 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b88be397e2f245f698339b945f09fb89 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/b88be397e2f245f698339b945f09fb89 2024-11-20T22:25:34,574 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/fe1d08b0e4474800b23293de9362fe10 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/C/fe1d08b0e4474800b23293de9362fe10 2024-11-20T22:25:34,580 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/recovered.edits/371.seqid to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075/recovered.edits/371.seqid 2024-11-20T22:25:34,581 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,581 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:25:34,581 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:25:34,584 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T22:25:34,588 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200602ebe1ee2a41d988dbdf8e5bc11577_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200602ebe1ee2a41d988dbdf8e5bc11577_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,589 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112012e21772de1349d69af9f6bd168fb058_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112012e21772de1349d69af9f6bd168fb058_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,590 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112015f0f098b6674f86a40ecb6a27b078c6_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112015f0f098b6674f86a40ecb6a27b078c6_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,592 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112017b87ab77317403b95a7c1d5485f79f8_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112017b87ab77317403b95a7c1d5485f79f8_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,593 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202295527450b14e7a825efdaff1db5cd0_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202295527450b14e7a825efdaff1db5cd0_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,594 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204fdf3a5f3d204f54b053484f85ecda07_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204fdf3a5f3d204f54b053484f85ecda07_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,595 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052c456df70164a4688f467ca5869c375_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052c456df70164a4688f467ca5869c375_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,598 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120649f5bea536b4727b800a9af4bbb8b3d_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120649f5bea536b4727b800a9af4bbb8b3d_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,599 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112072dbcc014f0440de95aa1c71d9127b71_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112072dbcc014f0440de95aa1c71d9127b71_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,601 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207e108d7e4bde42f8a2d2f2dd86093238_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207e108d7e4bde42f8a2d2f2dd86093238_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,602 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120967f7f42f592425fbd4faaab57f7a05b_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120967f7f42f592425fbd4faaab57f7a05b_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,606 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209a9880b4887d40178b8d099e636f68bb_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209a9880b4887d40178b8d099e636f68bb_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,619 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209f274dde2fb9424587f1ae38d29057d7_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209f274dde2fb9424587f1ae38d29057d7_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:34,628 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4d9bbb7fff144d48fe5a76e0c56a9a0_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4d9bbb7fff144d48fe5a76e0c56a9a0_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,632 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b7310864a2624a0c92d005a9b1b5bbc4_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b7310864a2624a0c92d005a9b1b5bbc4_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,633 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cec06a3d192a4c86946f8c19caa682c9_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cec06a3d192a4c86946f8c19caa682c9_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,635 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d08f9eb6c52c46768312bcd565db2045_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d08f9eb6c52c46768312bcd565db2045_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,637 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d16b868aec564e1eaf44fd4737c41127_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d16b868aec564e1eaf44fd4737c41127_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,640 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f4d085cad3de4757804148ddc937133c_2f10f015cf26343913efd4f9264f4075 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f4d085cad3de4757804148ddc937133c_2f10f015cf26343913efd4f9264f4075 2024-11-20T22:25:34,640 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:25:34,642 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=116, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:34,644 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:25:34,646 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:25:34,647 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=116, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:34,647 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:25:34,647 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141534647"}]},"ts":"9223372036854775807"} 2024-11-20T22:25:34,652 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:25:34,652 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2f10f015cf26343913efd4f9264f4075, NAME => 'TestAcidGuarantees,,1732141509006.2f10f015cf26343913efd4f9264f4075.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:25:34,652 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:25:34,652 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141534652"}]},"ts":"9223372036854775807"} 2024-11-20T22:25:34,660 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:25:34,668 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=116, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:34,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 149 msec 2024-11-20T22:25:34,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T22:25:34,823 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-20T22:25:34,831 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241 (was 239) - Thread LEAK? -, OpenFileDescriptor=460 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1046 (was 1099), ProcessCount=11 (was 11), AvailableMemoryMB=1746 (was 2128) 2024-11-20T22:25:34,842 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241, OpenFileDescriptor=460, MaxFileDescriptor=1048576, SystemLoadAverage=1046, ProcessCount=11, AvailableMemoryMB=1746 2024-11-20T22:25:34,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:25:34,844 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:25:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=117, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:25:34,851 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=117, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:25:34,851 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:34,852 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 117 2024-11-20T22:25:34,852 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=117, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:25:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=117 2024-11-20T22:25:34,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742302_1478 (size=963) 2024-11-20T22:25:34,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=117 2024-11-20T22:25:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=117 2024-11-20T22:25:35,259 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:25:35,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742303_1479 (size=53) 2024-11-20T22:25:35,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=117 2024-11-20T22:25:35,669 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:35,669 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 62dd6d22774f5784522279eafe291710, disabling compactions & flushes 2024-11-20T22:25:35,669 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:35,669 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:35,669 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. after waiting 0 ms 2024-11-20T22:25:35,669 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:35,669 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:35,669 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:35,671 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=117, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:25:35,671 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141535671"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141535671"}]},"ts":"1732141535671"} 2024-11-20T22:25:35,672 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:25:35,673 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=117, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:25:35,673 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141535673"}]},"ts":"1732141535673"} 2024-11-20T22:25:35,677 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:25:35,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=62dd6d22774f5784522279eafe291710, ASSIGN}] 2024-11-20T22:25:35,694 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=62dd6d22774f5784522279eafe291710, ASSIGN 2024-11-20T22:25:35,694 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=62dd6d22774f5784522279eafe291710, ASSIGN; state=OFFLINE, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=false 2024-11-20T22:25:35,848 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=62dd6d22774f5784522279eafe291710, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:35,850 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; OpenRegionProcedure 62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:25:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=117 2024-11-20T22:25:36,002 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:36,004 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:36,005 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(7285): Opening region: {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:25:36,005 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,005 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:25:36,005 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(7327): checking encryption for 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,005 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(7330): checking classloading for 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,006 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,007 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:36,007 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 62dd6d22774f5784522279eafe291710 columnFamilyName A 2024-11-20T22:25:36,007 DEBUG [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:36,008 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.HStore(327): Store=62dd6d22774f5784522279eafe291710/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:36,008 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,008 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:36,009 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 62dd6d22774f5784522279eafe291710 columnFamilyName B 2024-11-20T22:25:36,009 DEBUG [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:36,009 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.HStore(327): Store=62dd6d22774f5784522279eafe291710/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:36,009 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,010 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:25:36,010 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 62dd6d22774f5784522279eafe291710 columnFamilyName C 2024-11-20T22:25:36,010 DEBUG [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:25:36,011 INFO [StoreOpener-62dd6d22774f5784522279eafe291710-1 {}] regionserver.HStore(327): Store=62dd6d22774f5784522279eafe291710/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:25:36,011 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:36,011 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,012 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,013 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:25:36,013 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(1085): writing seq id for 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:36,015 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:25:36,015 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(1102): Opened 62dd6d22774f5784522279eafe291710; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74940754, jitterRate=0.11670425534248352}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:25:36,016 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegion(1001): Region open journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:36,017 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., pid=119, masterSystemTime=1732141536002 2024-11-20T22:25:36,018 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:36,018 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=119}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:36,018 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=62dd6d22774f5784522279eafe291710, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:36,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T22:25:36,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; OpenRegionProcedure 62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 in 169 msec 2024-11-20T22:25:36,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-11-20T22:25:36,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=62dd6d22774f5784522279eafe291710, ASSIGN in 327 msec 2024-11-20T22:25:36,022 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=117, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:25:36,023 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141536022"}]},"ts":"1732141536022"} 2024-11-20T22:25:36,023 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:25:36,035 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=117, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:25:36,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1910 sec 2024-11-20T22:25:36,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=117 2024-11-20T22:25:36,957 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 117 completed 2024-11-20T22:25:36,958 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67f4ca2c to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e58a318 2024-11-20T22:25:37,043 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c2fc32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,044 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,045 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33014, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,045 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:25:37,046 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:25:37,048 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f31bf28 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44ea87b5 2024-11-20T22:25:37,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52dfc076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,064 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3338bc39 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@23ff52c4 2024-11-20T22:25:37,093 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f8feabd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,094 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0cc902d0 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55f908ff 2024-11-20T22:25:37,124 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2553caff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,125 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e6756a1 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@33c40d62 2024-11-20T22:25:37,135 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8fb2dd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,136 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167e1f2a to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@542bf9a 2024-11-20T22:25:37,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5486bf70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,151 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ab073d to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b759e7 2024-11-20T22:25:37,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3772a4ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,183 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51c10dfb to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e861e45 2024-11-20T22:25:37,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c73305b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,203 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x039117ee to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5dbb55ef 2024-11-20T22:25:37,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@81f47eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,224 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a895ecf to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d1a7a5c 2024-11-20T22:25:37,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fc8480b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,243 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ee1e5bc to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@97b0a6 2024-11-20T22:25:37,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dc238af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:25:37,288 DEBUG [hconnection-0x7f92839c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,288 DEBUG [hconnection-0x6cb11359-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,289 DEBUG [hconnection-0x4d09a06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,289 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,294 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,295 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:37,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:37,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:37,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:37,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:37,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:37,314 DEBUG [hconnection-0x8608415-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,315 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,316 DEBUG [hconnection-0x10c84c72-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,317 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141597320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141597321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141597321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141597321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,322 DEBUG [hconnection-0x3c40b238-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,323 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33074, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,324 DEBUG [hconnection-0x752a7e4e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,325 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,332 DEBUG [hconnection-0xb3f5384-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,339 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141597340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,343 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:37,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-20T22:25:37,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:37,345 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:37,346 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:37,346 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:37,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/47fbfd5811294bce8b25956c7308e3da is 50, key is test_row_0/A:col10/1732141537303/Put/seqid=0 2024-11-20T22:25:37,357 DEBUG [hconnection-0x1b9962ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,359 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,375 DEBUG [hconnection-0x490901f3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:25:37,376 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:25:37,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742304_1480 (size=12001) 2024-11-20T22:25:37,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/47fbfd5811294bce8b25956c7308e3da 2024-11-20T22:25:37,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141597422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141597422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141597422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141597422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:37,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141597447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/452e1540fd4f437ca689915c3baaad5a is 50, key is test_row_0/B:col10/1732141537303/Put/seqid=0 2024-11-20T22:25:37,498 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T22:25:37,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:37,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:37,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:37,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742305_1481 (size=12001) 2024-11-20T22:25:37,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/452e1540fd4f437ca689915c3baaad5a 2024-11-20T22:25:37,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141597628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141597629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141597627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141597633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/ed5e140268c541b9a5bdc1cab13b33bf is 50, key is test_row_0/C:col10/1732141537303/Put/seqid=0 2024-11-20T22:25:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:37,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141597650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,656 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T22:25:37,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:37,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:37,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:37,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742306_1482 (size=12001) 2024-11-20T22:25:37,809 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T22:25:37,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:37,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:37,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:37,810 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141597931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141597934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141597935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141597935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:37,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141597954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,962 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:37,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T22:25:37,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:37,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:37,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:37,963 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,124 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T22:25:38,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:38,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:38,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:38,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:38,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/ed5e140268c541b9a5bdc1cab13b33bf 2024-11-20T22:25:38,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/47fbfd5811294bce8b25956c7308e3da as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/47fbfd5811294bce8b25956c7308e3da 2024-11-20T22:25:38,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/47fbfd5811294bce8b25956c7308e3da, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T22:25:38,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/452e1540fd4f437ca689915c3baaad5a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/452e1540fd4f437ca689915c3baaad5a 2024-11-20T22:25:38,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/452e1540fd4f437ca689915c3baaad5a, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T22:25:38,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/ed5e140268c541b9a5bdc1cab13b33bf as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ed5e140268c541b9a5bdc1cab13b33bf 2024-11-20T22:25:38,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ed5e140268c541b9a5bdc1cab13b33bf, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T22:25:38,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 62dd6d22774f5784522279eafe291710 in 895ms, sequenceid=13, compaction requested=false 2024-11-20T22:25:38,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:38,285 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T22:25:38,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:38,286 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:38,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:38,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:38,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:38,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:38,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:38,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:38,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/781f32b6ed934268a6e591bfeec1cd2f is 50, key is test_row_0/A:col10/1732141537320/Put/seqid=0 2024-11-20T22:25:38,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742307_1483 (size=12001) 2024-11-20T22:25:38,355 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/781f32b6ed934268a6e591bfeec1cd2f 2024-11-20T22:25:38,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/db7f6bc260084d86b830c159542fc050 is 50, key is test_row_0/B:col10/1732141537320/Put/seqid=0 2024-11-20T22:25:38,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742308_1484 (size=12001) 2024-11-20T22:25:38,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:38,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:38,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141598452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141598453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141598456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141598457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141598463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141598563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141598563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141598568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141598572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141598771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141598773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141598777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:38,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141598781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:38,833 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/db7f6bc260084d86b830c159542fc050 2024-11-20T22:25:38,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/f610b617f8fc407a90c2529dd6ef7284 is 50, key is test_row_0/C:col10/1732141537320/Put/seqid=0 2024-11-20T22:25:38,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742309_1485 (size=12001) 2024-11-20T22:25:39,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141599077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141599081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141599085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141599087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,163 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:25:39,351 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/f610b617f8fc407a90c2529dd6ef7284 2024-11-20T22:25:39,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/781f32b6ed934268a6e591bfeec1cd2f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/781f32b6ed934268a6e591bfeec1cd2f 2024-11-20T22:25:39,377 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/781f32b6ed934268a6e591bfeec1cd2f, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:25:39,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/db7f6bc260084d86b830c159542fc050 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/db7f6bc260084d86b830c159542fc050 2024-11-20T22:25:39,390 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:25:39,391 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/db7f6bc260084d86b830c159542fc050, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:25:39,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/f610b617f8fc407a90c2529dd6ef7284 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/f610b617f8fc407a90c2529dd6ef7284 2024-11-20T22:25:39,401 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/f610b617f8fc407a90c2529dd6ef7284, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T22:25:39,403 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 62dd6d22774f5784522279eafe291710 in 1116ms, sequenceid=38, compaction requested=false 2024-11-20T22:25:39,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:39,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:39,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-20T22:25:39,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-20T22:25:39,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T22:25:39,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0580 sec 2024-11-20T22:25:39,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.0640 sec 2024-11-20T22:25:39,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T22:25:39,449 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-20T22:25:39,452 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:39,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-20T22:25:39,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T22:25:39,454 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:39,454 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:39,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:39,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:25:39,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:39,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:39,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:39,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:39,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:39,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/0414b604141649ce978d02318bc1eb47 is 50, key is test_row_0/A:col10/1732141538451/Put/seqid=0 2024-11-20T22:25:39,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742310_1486 (size=14341) 2024-11-20T22:25:39,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T22:25:39,608 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:39,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141599643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141599644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141599651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141599653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141599654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T22:25:39,765 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:39,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:39,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:39,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:39,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141599766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141599766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141599769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141599772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141599772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,918 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:39,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:39,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:39,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:39,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:39,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/0414b604141649ce978d02318bc1eb47 2024-11-20T22:25:39,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/cc0fe3192985487f83df5c3976af3ab4 is 50, key is test_row_0/B:col10/1732141538451/Put/seqid=0 2024-11-20T22:25:39,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742311_1487 (size=12001) 2024-11-20T22:25:39,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141599975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141599977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141599977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141599979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:39,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141599980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T22:25:40,071 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:40,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:40,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,224 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:40,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:40,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141600280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141600283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141600283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141600284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141600287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/cc0fe3192985487f83df5c3976af3ab4 2024-11-20T22:25:40,378 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/8f77617ad4d6410ca8059c8e0332644a is 50, key is test_row_0/C:col10/1732141538451/Put/seqid=0 2024-11-20T22:25:40,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742312_1488 (size=12001) 2024-11-20T22:25:40,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:40,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:40,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T22:25:40,683 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:40,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:40,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141600788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/8f77617ad4d6410ca8059c8e0332644a 2024-11-20T22:25:40,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141600792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141600798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/0414b604141649ce978d02318bc1eb47 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0414b604141649ce978d02318bc1eb47 2024-11-20T22:25:40,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141600798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141600800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0414b604141649ce978d02318bc1eb47, entries=200, sequenceid=50, filesize=14.0 K 2024-11-20T22:25:40,842 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:40,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/cc0fe3192985487f83df5c3976af3ab4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/cc0fe3192985487f83df5c3976af3ab4 2024-11-20T22:25:40,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/cc0fe3192985487f83df5c3976af3ab4, entries=150, sequenceid=50, filesize=11.7 K 2024-11-20T22:25:40,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/8f77617ad4d6410ca8059c8e0332644a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8f77617ad4d6410ca8059c8e0332644a 2024-11-20T22:25:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:40,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8f77617ad4d6410ca8059c8e0332644a, entries=150, sequenceid=50, filesize=11.7 K 2024-11-20T22:25:40,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 62dd6d22774f5784522279eafe291710 in 1374ms, sequenceid=50, compaction requested=true 2024-11-20T22:25:40,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:40,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:40,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:40,857 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:40,857 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:40,858 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:40,859 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:40,859 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,859 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/47fbfd5811294bce8b25956c7308e3da, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/781f32b6ed934268a6e591bfeec1cd2f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0414b604141649ce978d02318bc1eb47] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=37.4 K 2024-11-20T22:25:40,860 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:40,860 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:40,860 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,860 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/452e1540fd4f437ca689915c3baaad5a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/db7f6bc260084d86b830c159542fc050, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/cc0fe3192985487f83df5c3976af3ab4] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.2 K 2024-11-20T22:25:40,860 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47fbfd5811294bce8b25956c7308e3da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141537301 2024-11-20T22:25:40,861 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 452e1540fd4f437ca689915c3baaad5a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141537301 2024-11-20T22:25:40,861 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting db7f6bc260084d86b830c159542fc050, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141537319 2024-11-20T22:25:40,861 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 781f32b6ed934268a6e591bfeec1cd2f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141537319 2024-11-20T22:25:40,861 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0414b604141649ce978d02318bc1eb47, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732141538436 2024-11-20T22:25:40,861 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting cc0fe3192985487f83df5c3976af3ab4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732141538451 2024-11-20T22:25:40,874 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#402 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:40,874 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/2fc533920e56481f9d4bfd07a24fbc97 is 50, key is test_row_0/B:col10/1732141538451/Put/seqid=0 2024-11-20T22:25:40,877 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#403 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:40,878 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/499eef5a7c1a46458c6aac9c1664920e is 50, key is test_row_0/A:col10/1732141538451/Put/seqid=0 2024-11-20T22:25:40,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742313_1489 (size=12104) 2024-11-20T22:25:40,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742314_1490 (size=12104) 2024-11-20T22:25:40,892 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/2fc533920e56481f9d4bfd07a24fbc97 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fc533920e56481f9d4bfd07a24fbc97 2024-11-20T22:25:40,901 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 2fc533920e56481f9d4bfd07a24fbc97(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:40,901 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:40,901 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141540857; duration=0sec 2024-11-20T22:25:40,901 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:40,901 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:40,901 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:40,903 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:40,903 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:40,903 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:40,903 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ed5e140268c541b9a5bdc1cab13b33bf, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/f610b617f8fc407a90c2529dd6ef7284, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8f77617ad4d6410ca8059c8e0332644a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.2 K 2024-11-20T22:25:40,904 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ed5e140268c541b9a5bdc1cab13b33bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732141537301 2024-11-20T22:25:40,905 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f610b617f8fc407a90c2529dd6ef7284, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732141537319 2024-11-20T22:25:40,906 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f77617ad4d6410ca8059c8e0332644a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732141538451 2024-11-20T22:25:40,917 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#404 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:40,917 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/2ae6dd37897046608860d510e8c9a819 is 50, key is test_row_0/C:col10/1732141538451/Put/seqid=0 2024-11-20T22:25:40,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742315_1491 (size=12104) 2024-11-20T22:25:41,002 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T22:25:41,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:41,005 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:41,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:41,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:41,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:41,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:41,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:41,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:41,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/723cc376cd16487abedb2279e94a8c19 is 50, key is test_row_0/A:col10/1732141539643/Put/seqid=0 2024-11-20T22:25:41,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742316_1492 (size=12001) 2024-11-20T22:25:41,020 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/723cc376cd16487abedb2279e94a8c19 2024-11-20T22:25:41,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/e1f629b2b1a145938ace35a6562abf85 is 50, key is test_row_0/B:col10/1732141539643/Put/seqid=0 2024-11-20T22:25:41,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742317_1493 (size=12001) 2024-11-20T22:25:41,067 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/e1f629b2b1a145938ace35a6562abf85 2024-11-20T22:25:41,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/666320cca71c4683b856af7699b9e05b is 50, key is test_row_0/C:col10/1732141539643/Put/seqid=0 2024-11-20T22:25:41,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742318_1494 (size=12001) 2024-11-20T22:25:41,102 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/666320cca71c4683b856af7699b9e05b 2024-11-20T22:25:41,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/723cc376cd16487abedb2279e94a8c19 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/723cc376cd16487abedb2279e94a8c19 2024-11-20T22:25:41,112 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/723cc376cd16487abedb2279e94a8c19, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:25:41,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/e1f629b2b1a145938ace35a6562abf85 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/e1f629b2b1a145938ace35a6562abf85 2024-11-20T22:25:41,123 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/e1f629b2b1a145938ace35a6562abf85, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:25:41,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/666320cca71c4683b856af7699b9e05b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/666320cca71c4683b856af7699b9e05b 2024-11-20T22:25:41,137 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/666320cca71c4683b856af7699b9e05b, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T22:25:41,138 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 62dd6d22774f5784522279eafe291710 in 133ms, sequenceid=75, compaction requested=false 2024-11-20T22:25:41,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:41,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:41,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-20T22:25:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-20T22:25:41,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-20T22:25:41,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6850 sec 2024-11-20T22:25:41,142 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.6890 sec 2024-11-20T22:25:41,306 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/499eef5a7c1a46458c6aac9c1664920e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/499eef5a7c1a46458c6aac9c1664920e 2024-11-20T22:25:41,312 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into 499eef5a7c1a46458c6aac9c1664920e(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:41,312 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:41,312 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141540857; duration=0sec 2024-11-20T22:25:41,312 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:41,312 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:41,368 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/2ae6dd37897046608860d510e8c9a819 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/2ae6dd37897046608860d510e8c9a819 2024-11-20T22:25:41,375 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into 2ae6dd37897046608860d510e8c9a819(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:41,375 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:41,375 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141540858; duration=0sec 2024-11-20T22:25:41,375 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:41,375 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:41,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T22:25:41,561 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-20T22:25:41,562 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:41,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-20T22:25:41,563 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:41,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T22:25:41,564 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:41,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:41,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T22:25:41,719 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-20T22:25:41,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:41,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:41,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:41,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-20T22:25:41,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-20T22:25:41,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-20T22:25:41,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 159 msec 2024-11-20T22:25:41,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 169 msec 2024-11-20T22:25:41,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:41,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:41,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:41,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:41,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:41,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:41,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:41,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:41,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/f6150d0f415742fd9457f0ab8160e6a2 is 50, key is test_row_0/A:col10/1732141541798/Put/seqid=0 2024-11-20T22:25:41,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T22:25:41,865 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-20T22:25:41,867 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:41,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-20T22:25:41,868 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:41,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T22:25:41,869 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:41,869 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:41,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141601875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742319_1495 (size=14341) 2024-11-20T22:25:41,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141601876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141601881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141601882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141601883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/f6150d0f415742fd9457f0ab8160e6a2 2024-11-20T22:25:41,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/bde84f6da7504698bfec16e8d72914a8 is 50, key is test_row_0/B:col10/1732141541798/Put/seqid=0 2024-11-20T22:25:41,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T22:25:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141601984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141601989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141601994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141601994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:41,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141601994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742320_1496 (size=12001) 2024-11-20T22:25:42,023 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T22:25:42,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:42,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,023 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T22:25:42,175 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T22:25:42,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:42,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141602197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141602198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141602199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141602199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141602200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,327 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T22:25:42,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:42,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:42,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/bde84f6da7504698bfec16e8d72914a8 2024-11-20T22:25:42,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/cc8c0a0e66194e239e9d144bd3da5576 is 50, key is test_row_0/C:col10/1732141541798/Put/seqid=0 2024-11-20T22:25:42,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742321_1497 (size=12001) 2024-11-20T22:25:42,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/cc8c0a0e66194e239e9d144bd3da5576 2024-11-20T22:25:42,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/f6150d0f415742fd9457f0ab8160e6a2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f6150d0f415742fd9457f0ab8160e6a2 2024-11-20T22:25:42,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f6150d0f415742fd9457f0ab8160e6a2, entries=200, sequenceid=90, filesize=14.0 K 2024-11-20T22:25:42,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/bde84f6da7504698bfec16e8d72914a8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/bde84f6da7504698bfec16e8d72914a8 2024-11-20T22:25:42,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/bde84f6da7504698bfec16e8d72914a8, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T22:25:42,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/cc8c0a0e66194e239e9d144bd3da5576 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/cc8c0a0e66194e239e9d144bd3da5576 2024-11-20T22:25:42,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/cc8c0a0e66194e239e9d144bd3da5576, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T22:25:42,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 62dd6d22774f5784522279eafe291710 in 637ms, sequenceid=90, compaction requested=true 2024-11-20T22:25:42,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:42,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:42,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:42,469 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:42,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:42,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:42,469 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:42,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:42,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T22:25:42,471 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:42,471 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:42,472 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,472 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/499eef5a7c1a46458c6aac9c1664920e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/723cc376cd16487abedb2279e94a8c19, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f6150d0f415742fd9457f0ab8160e6a2] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=37.5 K 2024-11-20T22:25:42,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:42,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:42,472 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,472 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 499eef5a7c1a46458c6aac9c1664920e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732141538451 2024-11-20T22:25:42,472 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fc533920e56481f9d4bfd07a24fbc97, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/e1f629b2b1a145938ace35a6562abf85, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/bde84f6da7504698bfec16e8d72914a8] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.3 K 2024-11-20T22:25:42,472 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 723cc376cd16487abedb2279e94a8c19, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141539643 2024-11-20T22:25:42,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fc533920e56481f9d4bfd07a24fbc97, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732141538451 2024-11-20T22:25:42,473 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6150d0f415742fd9457f0ab8160e6a2, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141541798 2024-11-20T22:25:42,473 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e1f629b2b1a145938ace35a6562abf85, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141539643 2024-11-20T22:25:42,473 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting bde84f6da7504698bfec16e8d72914a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141541798 2024-11-20T22:25:42,482 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:42,482 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/0316cb89ac24407fb325fbc70eb42331 is 50, key is test_row_0/B:col10/1732141541798/Put/seqid=0 2024-11-20T22:25:42,485 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#412 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:42,485 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/5705ffdcbbb9439892531a7ef3e0ac56 is 50, key is test_row_0/A:col10/1732141541798/Put/seqid=0 2024-11-20T22:25:42,493 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T22:25:42,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742323_1499 (size=12207) 2024-11-20T22:25:42,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,493 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:25:42,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:42,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:42,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:42,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:42,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:42,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:42,499 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/5705ffdcbbb9439892531a7ef3e0ac56 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/5705ffdcbbb9439892531a7ef3e0ac56 2024-11-20T22:25:42,505 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into 5705ffdcbbb9439892531a7ef3e0ac56(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:42,505 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:42,505 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141542469; duration=0sec 2024-11-20T22:25:42,505 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:42,505 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:42,505 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:42,507 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:42,507 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:42,507 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:42,507 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/2ae6dd37897046608860d510e8c9a819, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/666320cca71c4683b856af7699b9e05b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/cc8c0a0e66194e239e9d144bd3da5576] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.3 K 2024-11-20T22:25:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:42,508 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ae6dd37897046608860d510e8c9a819, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732141538451 2024-11-20T22:25:42,508 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 666320cca71c4683b856af7699b9e05b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732141539643 2024-11-20T22:25:42,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:42,508 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc8c0a0e66194e239e9d144bd3da5576, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141541798 2024-11-20T22:25:42,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141602512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141602512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/ff5b1e2bf01f4a5a8bde311dcf99cee9 is 50, key is test_row_0/A:col10/1732141541882/Put/seqid=0 2024-11-20T22:25:42,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742322_1498 (size=12207) 2024-11-20T22:25:42,517 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:42,518 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/631372ddd34b4ccf910c321aac95c3d9 is 50, key is test_row_0/C:col10/1732141541798/Put/seqid=0 2024-11-20T22:25:42,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141602513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141602513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141602514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,521 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/0316cb89ac24407fb325fbc70eb42331 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0316cb89ac24407fb325fbc70eb42331 2024-11-20T22:25:42,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742324_1500 (size=12001) 2024-11-20T22:25:42,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742325_1501 (size=12207) 2024-11-20T22:25:42,525 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 0316cb89ac24407fb325fbc70eb42331(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:42,525 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:42,525 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141542469; duration=0sec 2024-11-20T22:25:42,526 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:42,526 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:42,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141602615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141602615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141602620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141602620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141602621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141602822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141602823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141602823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141602823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141602824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:42,924 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/ff5b1e2bf01f4a5a8bde311dcf99cee9 2024-11-20T22:25:42,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/047f4bb8e0c9415882e66830ab2a3351 is 50, key is test_row_0/B:col10/1732141541882/Put/seqid=0 2024-11-20T22:25:42,931 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/631372ddd34b4ccf910c321aac95c3d9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/631372ddd34b4ccf910c321aac95c3d9 2024-11-20T22:25:42,935 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into 631372ddd34b4ccf910c321aac95c3d9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:42,935 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:42,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742326_1502 (size=12001) 2024-11-20T22:25:42,935 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141542469; duration=0sec 2024-11-20T22:25:42,935 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:42,935 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:42,935 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/047f4bb8e0c9415882e66830ab2a3351 2024-11-20T22:25:42,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/a445ea0a7e1d4de59dc1f88a1d2e4244 is 50, key is test_row_0/C:col10/1732141541882/Put/seqid=0 2024-11-20T22:25:42,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742327_1503 (size=12001) 2024-11-20T22:25:42,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T22:25:43,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141603127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141603128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141603128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141603129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141603130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,346 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/a445ea0a7e1d4de59dc1f88a1d2e4244 2024-11-20T22:25:43,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/ff5b1e2bf01f4a5a8bde311dcf99cee9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ff5b1e2bf01f4a5a8bde311dcf99cee9 2024-11-20T22:25:43,353 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ff5b1e2bf01f4a5a8bde311dcf99cee9, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T22:25:43,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/047f4bb8e0c9415882e66830ab2a3351 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/047f4bb8e0c9415882e66830ab2a3351 2024-11-20T22:25:43,357 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/047f4bb8e0c9415882e66830ab2a3351, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T22:25:43,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/a445ea0a7e1d4de59dc1f88a1d2e4244 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/a445ea0a7e1d4de59dc1f88a1d2e4244 2024-11-20T22:25:43,361 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/a445ea0a7e1d4de59dc1f88a1d2e4244, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T22:25:43,362 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 62dd6d22774f5784522279eafe291710 in 869ms, sequenceid=114, compaction requested=false 2024-11-20T22:25:43,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:43,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:43,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-20T22:25:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-20T22:25:43,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-20T22:25:43,365 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4940 sec 2024-11-20T22:25:43,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.4980 sec 2024-11-20T22:25:43,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:43,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:43,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:43,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:43,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:43,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:43,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:43,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:43,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/83da1e3c373648caad7c769560c04224 is 50, key is test_row_0/A:col10/1732141543636/Put/seqid=0 2024-11-20T22:25:43,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742328_1504 (size=12101) 2024-11-20T22:25:43,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141603662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141603663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141603668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141603668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141603668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141603769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141603771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141603774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141603777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141603777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T22:25:43,972 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-20T22:25:43,973 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:43,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-20T22:25:43,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:43,974 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:43,974 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:43,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:43,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141603974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141603976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141603979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141603982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:43,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:43,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141603983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/83da1e3c373648caad7c769560c04224 2024-11-20T22:25:44,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/22a793a44a9a461e948dd1ffc4480627 is 50, key is test_row_0/B:col10/1732141543636/Put/seqid=0 2024-11-20T22:25:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:44,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742329_1505 (size=12101) 2024-11-20T22:25:44,126 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:44,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:44,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,127 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:44,279 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141604279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:44,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:44,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141604280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141604287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141604288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141604288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,432 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:44,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:44,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:44,488 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/47fbfd5811294bce8b25956c7308e3da, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/781f32b6ed934268a6e591bfeec1cd2f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0414b604141649ce978d02318bc1eb47, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/499eef5a7c1a46458c6aac9c1664920e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/723cc376cd16487abedb2279e94a8c19, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f6150d0f415742fd9457f0ab8160e6a2] to archive 2024-11-20T22:25:44,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/22a793a44a9a461e948dd1ffc4480627 2024-11-20T22:25:44,489 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:44,492 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/47fbfd5811294bce8b25956c7308e3da to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/47fbfd5811294bce8b25956c7308e3da 2024-11-20T22:25:44,494 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/781f32b6ed934268a6e591bfeec1cd2f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/781f32b6ed934268a6e591bfeec1cd2f 2024-11-20T22:25:44,495 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0414b604141649ce978d02318bc1eb47 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0414b604141649ce978d02318bc1eb47 2024-11-20T22:25:44,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/df3f9ef147214f3ba5810eaec1faa2a8 is 50, key is test_row_0/C:col10/1732141543636/Put/seqid=0 2024-11-20T22:25:44,503 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/499eef5a7c1a46458c6aac9c1664920e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/499eef5a7c1a46458c6aac9c1664920e 2024-11-20T22:25:44,504 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/723cc376cd16487abedb2279e94a8c19 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/723cc376cd16487abedb2279e94a8c19 2024-11-20T22:25:44,505 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f6150d0f415742fd9457f0ab8160e6a2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f6150d0f415742fd9457f0ab8160e6a2 2024-11-20T22:25:44,506 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/452e1540fd4f437ca689915c3baaad5a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/db7f6bc260084d86b830c159542fc050, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fc533920e56481f9d4bfd07a24fbc97, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/cc0fe3192985487f83df5c3976af3ab4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/e1f629b2b1a145938ace35a6562abf85, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/bde84f6da7504698bfec16e8d72914a8] to archive 2024-11-20T22:25:44,506 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:44,507 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/452e1540fd4f437ca689915c3baaad5a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/452e1540fd4f437ca689915c3baaad5a 2024-11-20T22:25:44,508 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/db7f6bc260084d86b830c159542fc050 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/db7f6bc260084d86b830c159542fc050 2024-11-20T22:25:44,509 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fc533920e56481f9d4bfd07a24fbc97 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fc533920e56481f9d4bfd07a24fbc97 2024-11-20T22:25:44,510 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/cc0fe3192985487f83df5c3976af3ab4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/cc0fe3192985487f83df5c3976af3ab4 2024-11-20T22:25:44,511 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/e1f629b2b1a145938ace35a6562abf85 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/e1f629b2b1a145938ace35a6562abf85 2024-11-20T22:25:44,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742330_1506 (size=12101) 2024-11-20T22:25:44,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/df3f9ef147214f3ba5810eaec1faa2a8 2024-11-20T22:25:44,512 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/bde84f6da7504698bfec16e8d72914a8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/bde84f6da7504698bfec16e8d72914a8 2024-11-20T22:25:44,514 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ed5e140268c541b9a5bdc1cab13b33bf, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/f610b617f8fc407a90c2529dd6ef7284, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/2ae6dd37897046608860d510e8c9a819, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8f77617ad4d6410ca8059c8e0332644a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/666320cca71c4683b856af7699b9e05b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/cc8c0a0e66194e239e9d144bd3da5576] to archive 2024-11-20T22:25:44,515 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:25:44,517 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ed5e140268c541b9a5bdc1cab13b33bf to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ed5e140268c541b9a5bdc1cab13b33bf 2024-11-20T22:25:44,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/83da1e3c373648caad7c769560c04224 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/83da1e3c373648caad7c769560c04224 2024-11-20T22:25:44,518 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/f610b617f8fc407a90c2529dd6ef7284 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/f610b617f8fc407a90c2529dd6ef7284 2024-11-20T22:25:44,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/83da1e3c373648caad7c769560c04224, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T22:25:44,521 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/2ae6dd37897046608860d510e8c9a819 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/2ae6dd37897046608860d510e8c9a819 2024-11-20T22:25:44,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/22a793a44a9a461e948dd1ffc4480627 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/22a793a44a9a461e948dd1ffc4480627 2024-11-20T22:25:44,522 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8f77617ad4d6410ca8059c8e0332644a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8f77617ad4d6410ca8059c8e0332644a 2024-11-20T22:25:44,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/22a793a44a9a461e948dd1ffc4480627, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T22:25:44,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/df3f9ef147214f3ba5810eaec1faa2a8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/df3f9ef147214f3ba5810eaec1faa2a8 2024-11-20T22:25:44,534 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/666320cca71c4683b856af7699b9e05b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/666320cca71c4683b856af7699b9e05b 2024-11-20T22:25:44,538 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6365a1e51efd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/cc8c0a0e66194e239e9d144bd3da5576 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/cc8c0a0e66194e239e9d144bd3da5576 2024-11-20T22:25:44,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/df3f9ef147214f3ba5810eaec1faa2a8, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T22:25:44,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 62dd6d22774f5784522279eafe291710 in 906ms, sequenceid=131, compaction requested=true 2024-11-20T22:25:44,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:44,543 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:44,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:44,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:44,544 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:44,544 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:44,544 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:44,544 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:44,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:44,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:44,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:44,544 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/5705ffdcbbb9439892531a7ef3e0ac56, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ff5b1e2bf01f4a5a8bde311dcf99cee9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/83da1e3c373648caad7c769560c04224] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.5 K 2024-11-20T22:25:44,545 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5705ffdcbbb9439892531a7ef3e0ac56, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141541798 2024-11-20T22:25:44,545 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff5b1e2bf01f4a5a8bde311dcf99cee9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732141541874 2024-11-20T22:25:44,545 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:44,545 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:44,545 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,545 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0316cb89ac24407fb325fbc70eb42331, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/047f4bb8e0c9415882e66830ab2a3351, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/22a793a44a9a461e948dd1ffc4480627] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.5 K 2024-11-20T22:25:44,545 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83da1e3c373648caad7c769560c04224, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141542513 2024-11-20T22:25:44,545 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0316cb89ac24407fb325fbc70eb42331, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141541798 2024-11-20T22:25:44,546 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 047f4bb8e0c9415882e66830ab2a3351, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732141541874 2024-11-20T22:25:44,546 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 22a793a44a9a461e948dd1ffc4480627, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141542513 2024-11-20T22:25:44,555 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#420 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:44,555 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/13e4fb36b2ad49248111c1f9dffd844a is 50, key is test_row_0/A:col10/1732141543636/Put/seqid=0 2024-11-20T22:25:44,558 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#421 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:44,559 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/43d957b278494428a466526bb2dd14d2 is 50, key is test_row_0/B:col10/1732141543636/Put/seqid=0 2024-11-20T22:25:44,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742331_1507 (size=12204) 2024-11-20T22:25:44,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:44,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742332_1508 (size=12204) 2024-11-20T22:25:44,587 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/43d957b278494428a466526bb2dd14d2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/43d957b278494428a466526bb2dd14d2 2024-11-20T22:25:44,590 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T22:25:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,592 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T22:25:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:44,592 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 43d957b278494428a466526bb2dd14d2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:44,592 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:44,592 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141544544; duration=0sec 2024-11-20T22:25:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:44,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:44,593 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:44,593 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:44,593 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:44,593 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:44,594 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:44,594 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:44,594 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/631372ddd34b4ccf910c321aac95c3d9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/a445ea0a7e1d4de59dc1f88a1d2e4244, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/df3f9ef147214f3ba5810eaec1faa2a8] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.5 K 2024-11-20T22:25:44,595 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 631372ddd34b4ccf910c321aac95c3d9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732141541798 2024-11-20T22:25:44,596 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a445ea0a7e1d4de59dc1f88a1d2e4244, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732141541874 2024-11-20T22:25:44,596 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting df3f9ef147214f3ba5810eaec1faa2a8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141542513 2024-11-20T22:25:44,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/99c61628aff34e9a8bbcdaa020e64dc3 is 50, key is test_row_0/A:col10/1732141543667/Put/seqid=0 2024-11-20T22:25:44,606 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:44,606 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/ea776bf7842a4d13b193f7d9998c7c93 is 50, key is test_row_0/C:col10/1732141543636/Put/seqid=0 2024-11-20T22:25:44,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742333_1509 (size=12151) 2024-11-20T22:25:44,608 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/99c61628aff34e9a8bbcdaa020e64dc3 2024-11-20T22:25:44,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/208b217dc54845d3aa1f33354cf3a41f is 50, key is test_row_0/B:col10/1732141543667/Put/seqid=0 2024-11-20T22:25:44,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742334_1510 (size=12204) 2024-11-20T22:25:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742335_1511 (size=12151) 2024-11-20T22:25:44,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:44,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:44,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141604801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141604807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141604808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141604809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141604809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141604910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141604917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141604918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141604919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141604919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:44,989 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/13e4fb36b2ad49248111c1f9dffd844a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/13e4fb36b2ad49248111c1f9dffd844a 2024-11-20T22:25:44,995 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into 13e4fb36b2ad49248111c1f9dffd844a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:44,995 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:44,995 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141544543; duration=0sec 2024-11-20T22:25:44,995 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:44,995 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:45,025 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/208b217dc54845d3aa1f33354cf3a41f 2024-11-20T22:25:45,028 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/ea776bf7842a4d13b193f7d9998c7c93 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ea776bf7842a4d13b193f7d9998c7c93 2024-11-20T22:25:45,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/caec78dfda934d00b6aa9db96ded73a8 is 50, key is test_row_0/C:col10/1732141543667/Put/seqid=0 2024-11-20T22:25:45,033 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into ea776bf7842a4d13b193f7d9998c7c93(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:45,033 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:45,033 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141544544; duration=0sec 2024-11-20T22:25:45,033 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:45,033 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:45,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742336_1512 (size=12151) 2024-11-20T22:25:45,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:45,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141605113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141605124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141605124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141605124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141605126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141605419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141605430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141605431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141605431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141605433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,452 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/caec78dfda934d00b6aa9db96ded73a8 2024-11-20T22:25:45,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/99c61628aff34e9a8bbcdaa020e64dc3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/99c61628aff34e9a8bbcdaa020e64dc3 2024-11-20T22:25:45,468 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/99c61628aff34e9a8bbcdaa020e64dc3, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T22:25:45,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/208b217dc54845d3aa1f33354cf3a41f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/208b217dc54845d3aa1f33354cf3a41f 2024-11-20T22:25:45,477 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/208b217dc54845d3aa1f33354cf3a41f, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T22:25:45,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/caec78dfda934d00b6aa9db96ded73a8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/caec78dfda934d00b6aa9db96ded73a8 2024-11-20T22:25:45,486 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/caec78dfda934d00b6aa9db96ded73a8, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T22:25:45,488 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 62dd6d22774f5784522279eafe291710 in 896ms, sequenceid=154, compaction requested=false 2024-11-20T22:25:45,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:45,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:45,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-20T22:25:45,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-20T22:25:45,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-20T22:25:45,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5180 sec 2024-11-20T22:25:45,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.5220 sec 2024-11-20T22:25:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:45,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T22:25:45,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:45,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:45,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:45,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:45,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:45,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:45,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/7e117f6938b44296a2d336a7bb39cfb9 is 50, key is test_row_0/A:col10/1732141545929/Put/seqid=0 2024-11-20T22:25:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742337_1513 (size=12151) 2024-11-20T22:25:45,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/7e117f6938b44296a2d336a7bb39cfb9 2024-11-20T22:25:45,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/9394f1b4febb4ab18c9bfdfa9457547f is 50, key is test_row_0/B:col10/1732141545929/Put/seqid=0 2024-11-20T22:25:45,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742338_1514 (size=12151) 2024-11-20T22:25:45,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141605974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141605974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141605975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141605975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:45,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141605986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T22:25:46,078 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-20T22:25:46,081 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-20T22:25:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:46,083 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:46,084 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:46,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:46,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141606088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141606088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141606088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141606094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141606097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:46,239 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:46,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:46,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:46,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:46,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:46,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:46,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141606295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141606296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141606299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141606312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141606312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/9394f1b4febb4ab18c9bfdfa9457547f 2024-11-20T22:25:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:46,397 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:46,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:46,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:46,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:46,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:46,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:46,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/5485f584570e404e89525d3243a397e1 is 50, key is test_row_0/C:col10/1732141545929/Put/seqid=0 2024-11-20T22:25:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:46,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742339_1515 (size=12151) 2024-11-20T22:25:46,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/5485f584570e404e89525d3243a397e1 2024-11-20T22:25:46,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/7e117f6938b44296a2d336a7bb39cfb9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/7e117f6938b44296a2d336a7bb39cfb9 2024-11-20T22:25:46,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/7e117f6938b44296a2d336a7bb39cfb9, entries=150, sequenceid=171, filesize=11.9 K 2024-11-20T22:25:46,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/9394f1b4febb4ab18c9bfdfa9457547f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9394f1b4febb4ab18c9bfdfa9457547f 2024-11-20T22:25:46,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9394f1b4febb4ab18c9bfdfa9457547f, entries=150, sequenceid=171, filesize=11.9 K 2024-11-20T22:25:46,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/5485f584570e404e89525d3243a397e1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5485f584570e404e89525d3243a397e1 2024-11-20T22:25:46,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5485f584570e404e89525d3243a397e1, entries=150, sequenceid=171, filesize=11.9 K 2024-11-20T22:25:46,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 62dd6d22774f5784522279eafe291710 in 539ms, sequenceid=171, compaction requested=true 2024-11-20T22:25:46,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:46,470 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:46,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:46,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:46,470 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:46,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:46,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:46,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:46,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:46,471 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36506 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:46,471 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:46,471 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:46,471 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/13e4fb36b2ad49248111c1f9dffd844a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/99c61628aff34e9a8bbcdaa020e64dc3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/7e117f6938b44296a2d336a7bb39cfb9] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.7 K 2024-11-20T22:25:46,471 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13e4fb36b2ad49248111c1f9dffd844a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141542513 2024-11-20T22:25:46,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36506 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:46,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:46,472 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:46,472 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/43d957b278494428a466526bb2dd14d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/208b217dc54845d3aa1f33354cf3a41f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9394f1b4febb4ab18c9bfdfa9457547f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.7 K 2024-11-20T22:25:46,472 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99c61628aff34e9a8bbcdaa020e64dc3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732141543662 2024-11-20T22:25:46,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 43d957b278494428a466526bb2dd14d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141542513 2024-11-20T22:25:46,473 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 208b217dc54845d3aa1f33354cf3a41f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732141543662 2024-11-20T22:25:46,473 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e117f6938b44296a2d336a7bb39cfb9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141544805 2024-11-20T22:25:46,473 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9394f1b4febb4ab18c9bfdfa9457547f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141544805 2024-11-20T22:25:46,493 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:46,494 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/3fb55e98ef5143fa8d7a53e44f6c6361 is 50, key is test_row_0/A:col10/1732141545929/Put/seqid=0 2024-11-20T22:25:46,501 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#430 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:46,501 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/0a39da65585441069c6010402b12c78b is 50, key is test_row_0/B:col10/1732141545929/Put/seqid=0 2024-11-20T22:25:46,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742341_1517 (size=12357) 2024-11-20T22:25:46,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742340_1516 (size=12357) 2024-11-20T22:25:46,548 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/0a39da65585441069c6010402b12c78b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0a39da65585441069c6010402b12c78b 2024-11-20T22:25:46,554 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 0a39da65585441069c6010402b12c78b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:46,554 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:46,554 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141546470; duration=0sec 2024-11-20T22:25:46,554 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:46,554 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:46,554 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:46,554 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T22:25:46,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:46,555 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:25:46,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:46,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:46,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:46,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:46,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:46,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:46,557 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36506 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:46,557 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:46,557 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:46,557 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ea776bf7842a4d13b193f7d9998c7c93, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/caec78dfda934d00b6aa9db96ded73a8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5485f584570e404e89525d3243a397e1] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.7 K 2024-11-20T22:25:46,558 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/3fb55e98ef5143fa8d7a53e44f6c6361 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3fb55e98ef5143fa8d7a53e44f6c6361 2024-11-20T22:25:46,558 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ea776bf7842a4d13b193f7d9998c7c93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732141542513 2024-11-20T22:25:46,558 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting caec78dfda934d00b6aa9db96ded73a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732141543662 2024-11-20T22:25:46,559 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5485f584570e404e89525d3243a397e1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141544805 2024-11-20T22:25:46,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/a5e25116cc504e98ac2316ada000c7d9 is 50, key is test_row_0/A:col10/1732141545983/Put/seqid=0 2024-11-20T22:25:46,568 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into 3fb55e98ef5143fa8d7a53e44f6c6361(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:46,568 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:46,568 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141546470; duration=0sec 2024-11-20T22:25:46,568 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:46,568 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:46,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742342_1518 (size=12151) 2024-11-20T22:25:46,580 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/a5e25116cc504e98ac2316ada000c7d9 2024-11-20T22:25:46,588 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:46,588 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/8a79fa88e7a947e8ac9326bb361c9b23 is 50, key is test_row_0/C:col10/1732141545929/Put/seqid=0 2024-11-20T22:25:46,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6096e5ef1c8a45b9a2675014734037c6 is 50, key is test_row_0/B:col10/1732141545983/Put/seqid=0 2024-11-20T22:25:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:46,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:46,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742343_1519 (size=12357) 2024-11-20T22:25:46,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141606626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141606627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141606629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141606631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141606627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,642 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/8a79fa88e7a947e8ac9326bb361c9b23 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8a79fa88e7a947e8ac9326bb361c9b23 2024-11-20T22:25:46,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742344_1520 (size=12151) 2024-11-20T22:25:46,643 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6096e5ef1c8a45b9a2675014734037c6 2024-11-20T22:25:46,649 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into 8a79fa88e7a947e8ac9326bb361c9b23(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:46,649 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:46,649 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141546470; duration=0sec 2024-11-20T22:25:46,649 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:46,649 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:46,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/35a27dbf25b74fff8fdec5b919157735 is 50, key is test_row_0/C:col10/1732141545983/Put/seqid=0 2024-11-20T22:25:46,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742345_1521 (size=12151) 2024-11-20T22:25:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:46,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141606740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141606741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141606741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141606741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141606741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141606944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141606947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141606947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141606948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:46,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:46,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141606949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,062 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/35a27dbf25b74fff8fdec5b919157735 2024-11-20T22:25:47,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/a5e25116cc504e98ac2316ada000c7d9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/a5e25116cc504e98ac2316ada000c7d9 2024-11-20T22:25:47,069 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/a5e25116cc504e98ac2316ada000c7d9, entries=150, sequenceid=193, filesize=11.9 K 2024-11-20T22:25:47,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6096e5ef1c8a45b9a2675014734037c6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6096e5ef1c8a45b9a2675014734037c6 2024-11-20T22:25:47,074 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6096e5ef1c8a45b9a2675014734037c6, entries=150, sequenceid=193, filesize=11.9 K 2024-11-20T22:25:47,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/35a27dbf25b74fff8fdec5b919157735 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/35a27dbf25b74fff8fdec5b919157735 2024-11-20T22:25:47,078 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/35a27dbf25b74fff8fdec5b919157735, entries=150, sequenceid=193, filesize=11.9 K 2024-11-20T22:25:47,079 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 62dd6d22774f5784522279eafe291710 in 524ms, sequenceid=193, compaction requested=false 2024-11-20T22:25:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-20T22:25:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-20T22:25:47,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T22:25:47,088 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 996 msec 2024-11-20T22:25:47,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.0070 sec 2024-11-20T22:25:47,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T22:25:47,192 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-20T22:25:47,193 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:47,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-20T22:25:47,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:47,194 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:47,194 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:47,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:47,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T22:25:47,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:47,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:47,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:47,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:47,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:47,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:47,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:47,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c1038d373fc749d483872ea0342a413b is 50, key is test_row_0/A:col10/1732141546625/Put/seqid=0 2024-11-20T22:25:47,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742346_1522 (size=14541) 2024-11-20T22:25:47,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:47,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141607286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141607287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141607288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141607296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141607296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,346 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:47,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:47,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141607396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141607397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141607397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141607402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141607402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:47,498 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:47,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:47,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141607603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141607604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141607604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141607609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141607609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,650 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,651 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c1038d373fc749d483872ea0342a413b 2024-11-20T22:25:47,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/5e339d2ee5904e2fadbfa6c32c4b4dac is 50, key is test_row_0/B:col10/1732141546625/Put/seqid=0 2024-11-20T22:25:47,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742347_1523 (size=12151) 2024-11-20T22:25:47,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:47,803 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:47,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:47,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141607908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141607909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141607909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141607915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:47,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141607917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,956 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:47,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:47,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:47,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/5e339d2ee5904e2fadbfa6c32c4b4dac 2024-11-20T22:25:48,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/03506d0415774565bb08a387a21aa030 is 50, key is test_row_0/C:col10/1732141546625/Put/seqid=0 2024-11-20T22:25:48,107 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:48,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:48,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742348_1524 (size=12151) 2024-11-20T22:25:48,263 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:48,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:48,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:48,415 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:48,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:48,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:48,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141608418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141608419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141608422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:48,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141608425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:48,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141608426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/03506d0415774565bb08a387a21aa030 2024-11-20T22:25:48,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c1038d373fc749d483872ea0342a413b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c1038d373fc749d483872ea0342a413b 2024-11-20T22:25:48,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c1038d373fc749d483872ea0342a413b, entries=200, sequenceid=211, filesize=14.2 K 2024-11-20T22:25:48,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/5e339d2ee5904e2fadbfa6c32c4b4dac as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/5e339d2ee5904e2fadbfa6c32c4b4dac 2024-11-20T22:25:48,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/5e339d2ee5904e2fadbfa6c32c4b4dac, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T22:25:48,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/03506d0415774565bb08a387a21aa030 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/03506d0415774565bb08a387a21aa030 2024-11-20T22:25:48,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/03506d0415774565bb08a387a21aa030, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T22:25:48,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 62dd6d22774f5784522279eafe291710 in 1293ms, sequenceid=211, compaction requested=true 2024-11-20T22:25:48,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:48,546 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:48,546 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:48,547 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:48,547 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:48,547 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,547 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3fb55e98ef5143fa8d7a53e44f6c6361, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/a5e25116cc504e98ac2316ada000c7d9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c1038d373fc749d483872ea0342a413b] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=38.1 K 2024-11-20T22:25:48,547 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fb55e98ef5143fa8d7a53e44f6c6361, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141544805 2024-11-20T22:25:48,548 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:48,548 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5e25116cc504e98ac2316ada000c7d9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732141545961 2024-11-20T22:25:48,548 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:48,548 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,548 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0a39da65585441069c6010402b12c78b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6096e5ef1c8a45b9a2675014734037c6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/5e339d2ee5904e2fadbfa6c32c4b4dac] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.8 K 2024-11-20T22:25:48,552 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a39da65585441069c6010402b12c78b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141544805 2024-11-20T22:25:48,552 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1038d373fc749d483872ea0342a413b, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141546623 2024-11-20T22:25:48,553 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6096e5ef1c8a45b9a2675014734037c6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732141545961 2024-11-20T22:25:48,553 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e339d2ee5904e2fadbfa6c32c4b4dac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141546623 2024-11-20T22:25:48,565 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:48,565 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/f02a7d48df5d4b999a2142782a24feae is 50, key is test_row_0/A:col10/1732141546625/Put/seqid=0 2024-11-20T22:25:48,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:48,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T22:25:48,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,572 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:25:48,572 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#439 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:48,572 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6b5391098b174ffdac98243a0af29bba is 50, key is test_row_0/B:col10/1732141546625/Put/seqid=0 2024-11-20T22:25:48,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742349_1525 (size=12459) 2024-11-20T22:25:48,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742350_1526 (size=12459) 2024-11-20T22:25:48,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/51fccc25554a4d6a969cd8764d78a9bd is 50, key is test_row_0/A:col10/1732141547273/Put/seqid=0 2024-11-20T22:25:48,647 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6b5391098b174ffdac98243a0af29bba as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6b5391098b174ffdac98243a0af29bba 2024-11-20T22:25:48,655 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 6b5391098b174ffdac98243a0af29bba(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:48,655 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:48,655 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141548546; duration=0sec 2024-11-20T22:25:48,655 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:48,655 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:48,655 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:48,656 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:48,656 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:48,656 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,656 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8a79fa88e7a947e8ac9326bb361c9b23, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/35a27dbf25b74fff8fdec5b919157735, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/03506d0415774565bb08a387a21aa030] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.8 K 2024-11-20T22:25:48,656 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a79fa88e7a947e8ac9326bb361c9b23, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732141544805 2024-11-20T22:25:48,657 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 35a27dbf25b74fff8fdec5b919157735, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732141545961 2024-11-20T22:25:48,657 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 03506d0415774565bb08a387a21aa030, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141546623 2024-11-20T22:25:48,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742351_1527 (size=12151) 2024-11-20T22:25:48,669 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/51fccc25554a4d6a969cd8764d78a9bd 2024-11-20T22:25:48,691 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#441 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:48,691 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/9465ea275b94404fa16672b9a86376f3 is 50, key is test_row_0/C:col10/1732141546625/Put/seqid=0 2024-11-20T22:25:48,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6237b06610054348bc1e56f2067f5721 is 50, key is test_row_0/B:col10/1732141547273/Put/seqid=0 2024-11-20T22:25:48,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742353_1529 (size=12151) 2024-11-20T22:25:48,726 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6237b06610054348bc1e56f2067f5721 2024-11-20T22:25:48,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742352_1528 (size=12459) 2024-11-20T22:25:48,750 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/9465ea275b94404fa16672b9a86376f3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/9465ea275b94404fa16672b9a86376f3 2024-11-20T22:25:48,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/81f7aa0c149b433f8085fa44703567df is 50, key is test_row_0/C:col10/1732141547273/Put/seqid=0 2024-11-20T22:25:48,761 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into 9465ea275b94404fa16672b9a86376f3(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:48,761 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:48,761 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141548546; duration=0sec 2024-11-20T22:25:48,761 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:48,761 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:48,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742354_1530 (size=12151) 2024-11-20T22:25:48,784 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/81f7aa0c149b433f8085fa44703567df 2024-11-20T22:25:48,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/51fccc25554a4d6a969cd8764d78a9bd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/51fccc25554a4d6a969cd8764d78a9bd 2024-11-20T22:25:48,799 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/51fccc25554a4d6a969cd8764d78a9bd, entries=150, sequenceid=232, filesize=11.9 K 2024-11-20T22:25:48,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6237b06610054348bc1e56f2067f5721 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6237b06610054348bc1e56f2067f5721 2024-11-20T22:25:48,816 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6237b06610054348bc1e56f2067f5721, entries=150, sequenceid=232, filesize=11.9 K 2024-11-20T22:25:48,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/81f7aa0c149b433f8085fa44703567df as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/81f7aa0c149b433f8085fa44703567df 2024-11-20T22:25:48,834 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/81f7aa0c149b433f8085fa44703567df, entries=150, sequenceid=232, filesize=11.9 K 2024-11-20T22:25:48,835 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=0 B/0 for 62dd6d22774f5784522279eafe291710 in 264ms, sequenceid=232, compaction requested=false 2024-11-20T22:25:48,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:48,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:48,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-20T22:25:48,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-20T22:25:48,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-20T22:25:48,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6410 sec 2024-11-20T22:25:48,839 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.6450 sec 2024-11-20T22:25:49,005 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/f02a7d48df5d4b999a2142782a24feae as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f02a7d48df5d4b999a2142782a24feae 2024-11-20T22:25:49,015 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into f02a7d48df5d4b999a2142782a24feae(size=12.2 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:49,015 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:49,015 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141548546; duration=0sec 2024-11-20T22:25:49,015 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:49,015 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:49,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T22:25:49,298 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T22:25:49,301 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:49,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-20T22:25:49,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:49,312 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:49,315 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:49,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:49,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:49,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:49,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:25:49,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:49,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:49,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:49,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:49,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:49,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:49,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/4e783a4660d54de299b974a98f2c8ed6 is 50, key is test_row_0/A:col10/1732141549453/Put/seqid=0 2024-11-20T22:25:49,470 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:49,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:49,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:49,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:49,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742355_1531 (size=12151) 2024-11-20T22:25:49,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141609492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141609495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141609495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141609496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141609497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141609606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141609606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141609607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:49,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141609615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141609615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:49,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:49,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:49,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:49,631 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,786 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:49,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:49,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:49,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:49,787 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141609812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141609813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141609815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141609823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141609825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/4e783a4660d54de299b974a98f2c8ed6 2024-11-20T22:25:49,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:49,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/4479633cf2fb4dfba4f71e7feff74fd5 is 50, key is test_row_0/B:col10/1732141549453/Put/seqid=0 2024-11-20T22:25:49,940 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:49,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:49,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:49,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:49,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:49,941 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742356_1532 (size=12151) 2024-11-20T22:25:49,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/4479633cf2fb4dfba4f71e7feff74fd5 2024-11-20T22:25:49,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/0b8015a4a1a7490181930cc7814bdbd7 is 50, key is test_row_0/C:col10/1732141549453/Put/seqid=0 2024-11-20T22:25:49,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742357_1533 (size=12151) 2024-11-20T22:25:50,093 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141610118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141610120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141610121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141610129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141610131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,246 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:50,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:50,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,392 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/0b8015a4a1a7490181930cc7814bdbd7 2024-11-20T22:25:50,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/4e783a4660d54de299b974a98f2c8ed6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/4e783a4660d54de299b974a98f2c8ed6 2024-11-20T22:25:50,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/4e783a4660d54de299b974a98f2c8ed6, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T22:25:50,398 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:50,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/4479633cf2fb4dfba4f71e7feff74fd5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4479633cf2fb4dfba4f71e7feff74fd5 2024-11-20T22:25:50,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:50,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:50,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4479633cf2fb4dfba4f71e7feff74fd5, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T22:25:50,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/0b8015a4a1a7490181930cc7814bdbd7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0b8015a4a1a7490181930cc7814bdbd7 2024-11-20T22:25:50,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0b8015a4a1a7490181930cc7814bdbd7, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T22:25:50,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 62dd6d22774f5784522279eafe291710 in 950ms, sequenceid=248, compaction requested=true 2024-11-20T22:25:50,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:50,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:50,405 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:50,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:50,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:50,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:50,406 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:50,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:50,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:50,406 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:50,406 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:50,406 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:50,406 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:50,406 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,406 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,406 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6b5391098b174ffdac98243a0af29bba, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6237b06610054348bc1e56f2067f5721, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4479633cf2fb4dfba4f71e7feff74fd5] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.9 K 2024-11-20T22:25:50,406 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f02a7d48df5d4b999a2142782a24feae, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/51fccc25554a4d6a969cd8764d78a9bd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/4e783a4660d54de299b974a98f2c8ed6] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.9 K 2024-11-20T22:25:50,406 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b5391098b174ffdac98243a0af29bba, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141546623 2024-11-20T22:25:50,406 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f02a7d48df5d4b999a2142782a24feae, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141546623 2024-11-20T22:25:50,407 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6237b06610054348bc1e56f2067f5721, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732141547273 2024-11-20T22:25:50,407 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51fccc25554a4d6a969cd8764d78a9bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732141547273 2024-11-20T22:25:50,407 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4479633cf2fb4dfba4f71e7feff74fd5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141549442 2024-11-20T22:25:50,407 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e783a4660d54de299b974a98f2c8ed6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141549442 2024-11-20T22:25:50,411 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:50,412 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/7bb95efd03d84957a707789c6ac51ccd is 50, key is test_row_0/B:col10/1732141549453/Put/seqid=0 2024-11-20T22:25:50,412 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#448 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:50,412 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/40bcb49aaa594b9b8ad70e31ca0ed04b is 50, key is test_row_0/A:col10/1732141549453/Put/seqid=0 2024-11-20T22:25:50,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:50,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742358_1534 (size=12561) 2024-11-20T22:25:50,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742359_1535 (size=12561) 2024-11-20T22:25:50,420 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/7bb95efd03d84957a707789c6ac51ccd as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/7bb95efd03d84957a707789c6ac51ccd 2024-11-20T22:25:50,420 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/40bcb49aaa594b9b8ad70e31ca0ed04b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/40bcb49aaa594b9b8ad70e31ca0ed04b 2024-11-20T22:25:50,423 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 7bb95efd03d84957a707789c6ac51ccd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:50,423 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:50,423 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141550405; duration=0sec 2024-11-20T22:25:50,423 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:50,423 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:50,423 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:50,424 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:50,424 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:50,424 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,424 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into 40bcb49aaa594b9b8ad70e31ca0ed04b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:50,424 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/9465ea275b94404fa16672b9a86376f3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/81f7aa0c149b433f8085fa44703567df, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0b8015a4a1a7490181930cc7814bdbd7] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=35.9 K 2024-11-20T22:25:50,424 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:50,424 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141550405; duration=0sec 2024-11-20T22:25:50,424 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:50,424 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:50,425 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9465ea275b94404fa16672b9a86376f3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732141546623 2024-11-20T22:25:50,425 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 81f7aa0c149b433f8085fa44703567df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732141547273 2024-11-20T22:25:50,426 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b8015a4a1a7490181930cc7814bdbd7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141549442 2024-11-20T22:25:50,434 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:50,435 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/70533e668d77489d9d36e0c01b003a88 is 50, key is test_row_0/C:col10/1732141549453/Put/seqid=0 2024-11-20T22:25:50,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742360_1536 (size=12561) 2024-11-20T22:25:50,474 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/70533e668d77489d9d36e0c01b003a88 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/70533e668d77489d9d36e0c01b003a88 2024-11-20T22:25:50,479 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into 70533e668d77489d9d36e0c01b003a88(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:50,480 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:50,480 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141550406; duration=0sec 2024-11-20T22:25:50,480 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:50,480 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:50,550 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T22:25:50,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:50,551 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T22:25:50,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:50,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:50,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:50,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:50,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:50,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:50,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/3ba7e998c60e43e7b46a322f9172ca5e is 50, key is test_row_0/A:col10/1732141549495/Put/seqid=0 2024-11-20T22:25:50,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742361_1537 (size=12301) 2024-11-20T22:25:50,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:50,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:50,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141610642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141610643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141610643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141610644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141610647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141610746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141610748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141610750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141610750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141610750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141610953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141610953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141610953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,960 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/3ba7e998c60e43e7b46a322f9172ca5e 2024-11-20T22:25:50,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141610955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:50,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141610955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:50,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/2fa6a61401814e4d98b9816104edb5dc is 50, key is test_row_0/B:col10/1732141549495/Put/seqid=0 2024-11-20T22:25:50,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742362_1538 (size=12301) 2024-11-20T22:25:51,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141611259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141611260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141611261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141611261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141611263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,372 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/2fa6a61401814e4d98b9816104edb5dc 2024-11-20T22:25:51,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/fa7b3c5045664841bc3e5503ba5fc28e is 50, key is test_row_0/C:col10/1732141549495/Put/seqid=0 2024-11-20T22:25:51,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742363_1539 (size=12301) 2024-11-20T22:25:51,382 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/fa7b3c5045664841bc3e5503ba5fc28e 2024-11-20T22:25:51,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/3ba7e998c60e43e7b46a322f9172ca5e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3ba7e998c60e43e7b46a322f9172ca5e 2024-11-20T22:25:51,389 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3ba7e998c60e43e7b46a322f9172ca5e, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T22:25:51,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/2fa6a61401814e4d98b9816104edb5dc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fa6a61401814e4d98b9816104edb5dc 2024-11-20T22:25:51,393 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fa6a61401814e4d98b9816104edb5dc, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T22:25:51,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/fa7b3c5045664841bc3e5503ba5fc28e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/fa7b3c5045664841bc3e5503ba5fc28e 2024-11-20T22:25:51,396 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/fa7b3c5045664841bc3e5503ba5fc28e, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T22:25:51,397 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 62dd6d22774f5784522279eafe291710 in 845ms, sequenceid=274, compaction requested=false 2024-11-20T22:25:51,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:51,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:51,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-20T22:25:51,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-20T22:25:51,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T22:25:51,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0830 sec 2024-11-20T22:25:51,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.0980 sec 2024-11-20T22:25:51,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T22:25:51,413 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-20T22:25:51,414 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:51,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-20T22:25:51,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:51,416 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:51,416 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:51,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:51,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:51,567 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T22:25:51,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:51,568 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T22:25:51,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:51,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:51,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:51,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:51,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:51,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:51,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/28e278b7cf394dc08424d7f3184ed7b4 is 50, key is test_row_0/A:col10/1732141550643/Put/seqid=0 2024-11-20T22:25:51,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742364_1540 (size=12301) 2024-11-20T22:25:51,578 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/28e278b7cf394dc08424d7f3184ed7b4 2024-11-20T22:25:51,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a39f123da83241d5a9659c7095138f75 is 50, key is test_row_0/B:col10/1732141550643/Put/seqid=0 2024-11-20T22:25:51,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742365_1541 (size=12301) 2024-11-20T22:25:51,617 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a39f123da83241d5a9659c7095138f75 2024-11-20T22:25:51,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/5d99220ec28e48aaac65c08f7668d233 is 50, key is test_row_0/C:col10/1732141550643/Put/seqid=0 2024-11-20T22:25:51,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742366_1542 (size=12301) 2024-11-20T22:25:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:51,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:51,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141611819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141611821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141611827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141611829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141611830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141611930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141611931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141611931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141611932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:51,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:51,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141611940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:52,066 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/5d99220ec28e48aaac65c08f7668d233 2024-11-20T22:25:52,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/28e278b7cf394dc08424d7f3184ed7b4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/28e278b7cf394dc08424d7f3184ed7b4 2024-11-20T22:25:52,081 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/28e278b7cf394dc08424d7f3184ed7b4, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T22:25:52,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a39f123da83241d5a9659c7095138f75 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a39f123da83241d5a9659c7095138f75 2024-11-20T22:25:52,089 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a39f123da83241d5a9659c7095138f75, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T22:25:52,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/5d99220ec28e48aaac65c08f7668d233 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5d99220ec28e48aaac65c08f7668d233 2024-11-20T22:25:52,100 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5d99220ec28e48aaac65c08f7668d233, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T22:25:52,101 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 62dd6d22774f5784522279eafe291710 in 534ms, sequenceid=287, compaction requested=true 2024-11-20T22:25:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-20T22:25:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-20T22:25:52,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T22:25:52,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 686 msec 2024-11-20T22:25:52,106 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 691 msec 2024-11-20T22:25:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:52,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:25:52,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:52,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:52,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:52,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:52,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c058cc32d7a049c5aed37f46783d8718 is 50, key is test_row_0/A:col10/1732141552148/Put/seqid=0 2024-11-20T22:25:52,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141612191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141612199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141612200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141612201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141612204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742367_1543 (size=14741) 2024-11-20T22:25:52,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141612306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141612312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141612314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141612316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141612324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T22:25:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141612515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,519 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T22:25:52,520 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-20T22:25:52,521 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:52,522 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:52,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:52,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141612519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141612524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141612524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141612531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:52,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c058cc32d7a049c5aed37f46783d8718 2024-11-20T22:25:52,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/59a290b7bde344188ce8b0f910b7d81b is 50, key is test_row_0/B:col10/1732141552148/Put/seqid=0 2024-11-20T22:25:52,674 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:52,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:52,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:52,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:52,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742368_1544 (size=12301) 2024-11-20T22:25:52,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:52,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141612824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,827 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:52,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:52,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:52,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:52,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141612831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141612838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141612838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141612842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:52,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:52,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:52,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:52,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:52,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:52,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/59a290b7bde344188ce8b0f910b7d81b 2024-11-20T22:25:53,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/c720defc07ac44c294eaa1ef7f6d2f6f is 50, key is test_row_0/C:col10/1732141552148/Put/seqid=0 2024-11-20T22:25:53,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:53,132 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:53,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:53,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:53,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:53,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:53,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742369_1545 (size=12301) 2024-11-20T22:25:53,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/c720defc07ac44c294eaa1ef7f6d2f6f 2024-11-20T22:25:53,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c058cc32d7a049c5aed37f46783d8718 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c058cc32d7a049c5aed37f46783d8718 2024-11-20T22:25:53,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c058cc32d7a049c5aed37f46783d8718, entries=200, sequenceid=314, filesize=14.4 K 2024-11-20T22:25:53,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/59a290b7bde344188ce8b0f910b7d81b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/59a290b7bde344188ce8b0f910b7d81b 2024-11-20T22:25:53,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/59a290b7bde344188ce8b0f910b7d81b, entries=150, sequenceid=314, filesize=12.0 K 2024-11-20T22:25:53,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/c720defc07ac44c294eaa1ef7f6d2f6f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/c720defc07ac44c294eaa1ef7f6d2f6f 2024-11-20T22:25:53,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/c720defc07ac44c294eaa1ef7f6d2f6f, entries=150, sequenceid=314, filesize=12.0 K 2024-11-20T22:25:53,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 62dd6d22774f5784522279eafe291710 in 976ms, sequenceid=314, compaction requested=true 2024-11-20T22:25:53,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:53,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:53,161 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:53,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:53,161 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:53,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:53,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:53,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:53,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:53,162 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49464 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:53,162 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:53,162 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:53,162 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/7bb95efd03d84957a707789c6ac51ccd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fa6a61401814e4d98b9816104edb5dc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a39f123da83241d5a9659c7095138f75, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/59a290b7bde344188ce8b0f910b7d81b] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=48.3 K 2024-11-20T22:25:53,167 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51904 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:53,167 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:53,167 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:53,167 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/40bcb49aaa594b9b8ad70e31ca0ed04b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3ba7e998c60e43e7b46a322f9172ca5e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/28e278b7cf394dc08424d7f3184ed7b4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c058cc32d7a049c5aed37f46783d8718] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=50.7 K 2024-11-20T22:25:53,168 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bb95efd03d84957a707789c6ac51ccd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141549442 2024-11-20T22:25:53,168 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40bcb49aaa594b9b8ad70e31ca0ed04b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141549442 2024-11-20T22:25:53,168 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ba7e998c60e43e7b46a322f9172ca5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141549494 2024-11-20T22:25:53,168 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fa6a61401814e4d98b9816104edb5dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141549494 2024-11-20T22:25:53,168 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28e278b7cf394dc08424d7f3184ed7b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141550636 2024-11-20T22:25:53,169 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a39f123da83241d5a9659c7095138f75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141550636 2024-11-20T22:25:53,169 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c058cc32d7a049c5aed37f46783d8718, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732141551828 2024-11-20T22:25:53,169 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 59a290b7bde344188ce8b0f910b7d81b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732141552137 2024-11-20T22:25:53,193 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#459 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:53,194 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/9f676883717e4c7a8e10a924c394d03c is 50, key is test_row_0/A:col10/1732141552148/Put/seqid=0 2024-11-20T22:25:53,202 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#460 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:53,203 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/fb53c712850841e9910b21bb1fd4c679 is 50, key is test_row_0/B:col10/1732141552148/Put/seqid=0 2024-11-20T22:25:53,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742370_1546 (size=12847) 2024-11-20T22:25:53,271 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/9f676883717e4c7a8e10a924c394d03c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/9f676883717e4c7a8e10a924c394d03c 2024-11-20T22:25:53,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742371_1547 (size=12847) 2024-11-20T22:25:53,276 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into 9f676883717e4c7a8e10a924c394d03c(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:53,276 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:53,277 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=12, startTime=1732141553160; duration=0sec 2024-11-20T22:25:53,277 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:53,277 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:53,277 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:25:53,280 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49464 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:25:53,280 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:53,280 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:53,280 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/70533e668d77489d9d36e0c01b003a88, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/fa7b3c5045664841bc3e5503ba5fc28e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5d99220ec28e48aaac65c08f7668d233, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/c720defc07ac44c294eaa1ef7f6d2f6f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=48.3 K 2024-11-20T22:25:53,281 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70533e668d77489d9d36e0c01b003a88, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732141549442 2024-11-20T22:25:53,281 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa7b3c5045664841bc3e5503ba5fc28e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732141549494 2024-11-20T22:25:53,282 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d99220ec28e48aaac65c08f7668d233, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732141550636 2024-11-20T22:25:53,282 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c720defc07ac44c294eaa1ef7f6d2f6f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732141552137 2024-11-20T22:25:53,284 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/fb53c712850841e9910b21bb1fd4c679 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fb53c712850841e9910b21bb1fd4c679 2024-11-20T22:25:53,284 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T22:25:53,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:53,286 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T22:25:53,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:53,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:53,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:53,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:53,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:53,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:53,301 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into fb53c712850841e9910b21bb1fd4c679(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:53,301 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:53,301 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=12, startTime=1732141553161; duration=0sec 2024-11-20T22:25:53,302 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:53,302 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:53,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c69e44bba74f4d198e6bfa78a09bef0a is 50, key is test_row_0/A:col10/1732141552198/Put/seqid=0 2024-11-20T22:25:53,321 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#462 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:53,321 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/b2eb7f4ceb9340ae989c3293aba2add0 is 50, key is test_row_0/C:col10/1732141552148/Put/seqid=0 2024-11-20T22:25:53,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:53,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742372_1548 (size=12301) 2024-11-20T22:25:53,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742373_1549 (size=12847) 2024-11-20T22:25:53,384 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/b2eb7f4ceb9340ae989c3293aba2add0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/b2eb7f4ceb9340ae989c3293aba2add0 2024-11-20T22:25:53,389 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into b2eb7f4ceb9340ae989c3293aba2add0(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:53,389 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:53,389 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=12, startTime=1732141553161; duration=0sec 2024-11-20T22:25:53,389 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:53,389 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:53,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141613399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141613399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141613401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141613407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141613408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141613512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141613512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141613512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141613512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141613523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:53,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141613718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141613718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141613718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141613719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141613729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:53,764 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c69e44bba74f4d198e6bfa78a09bef0a 2024-11-20T22:25:53,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/fd3074762e544d89ac35eeab6329ba3c is 50, key is test_row_0/B:col10/1732141552198/Put/seqid=0 2024-11-20T22:25:53,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742374_1550 (size=12301) 2024-11-20T22:25:53,831 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/fd3074762e544d89ac35eeab6329ba3c 2024-11-20T22:25:53,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/777f8e6dcec04664a6d2dd72475ec52e is 50, key is test_row_0/C:col10/1732141552198/Put/seqid=0 2024-11-20T22:25:53,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742375_1551 (size=12301) 2024-11-20T22:25:53,895 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/777f8e6dcec04664a6d2dd72475ec52e 2024-11-20T22:25:53,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/c69e44bba74f4d198e6bfa78a09bef0a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c69e44bba74f4d198e6bfa78a09bef0a 2024-11-20T22:25:53,925 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c69e44bba74f4d198e6bfa78a09bef0a, entries=150, sequenceid=324, filesize=12.0 K 2024-11-20T22:25:53,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/fd3074762e544d89ac35eeab6329ba3c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fd3074762e544d89ac35eeab6329ba3c 2024-11-20T22:25:53,932 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fd3074762e544d89ac35eeab6329ba3c, entries=150, sequenceid=324, filesize=12.0 K 2024-11-20T22:25:53,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/777f8e6dcec04664a6d2dd72475ec52e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/777f8e6dcec04664a6d2dd72475ec52e 2024-11-20T22:25:53,937 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/777f8e6dcec04664a6d2dd72475ec52e, entries=150, sequenceid=324, filesize=12.0 K 2024-11-20T22:25:53,937 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 62dd6d22774f5784522279eafe291710 in 651ms, sequenceid=324, compaction requested=false 2024-11-20T22:25:53,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:53,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:53,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-20T22:25:53,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-20T22:25:53,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T22:25:53,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4160 sec 2024-11-20T22:25:53,941 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.4210 sec 2024-11-20T22:25:54,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:54,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T22:25:54,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:54,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:54,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:54,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:54,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:54,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:54,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141614029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/b8f84af128a84613adb9badb8ab809d9 is 50, key is test_row_0/A:col10/1732141554023/Put/seqid=0 2024-11-20T22:25:54,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141614032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141614032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141614034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141614036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742376_1552 (size=14741) 2024-11-20T22:25:54,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/b8f84af128a84613adb9badb8ab809d9 2024-11-20T22:25:54,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a1cfa73174d04e5cb9c578b8dac3de7c is 50, key is test_row_0/B:col10/1732141554023/Put/seqid=0 2024-11-20T22:25:54,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742377_1553 (size=12301) 2024-11-20T22:25:54,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141614135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141614143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141614144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141614144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141614341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141614351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141614351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141614351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a1cfa73174d04e5cb9c578b8dac3de7c 2024-11-20T22:25:54,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/7145c8902b944ab39777d7826cc84aa4 is 50, key is test_row_0/C:col10/1732141554023/Put/seqid=0 2024-11-20T22:25:54,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141614544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742378_1554 (size=12301) 2024-11-20T22:25:54,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T22:25:54,631 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-20T22:25:54,632 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:54,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-20T22:25:54,635 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:54,636 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:54,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:54,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:54,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141614648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141614670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141614673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:54,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141614673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:54,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T22:25:54,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:54,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:54,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:54,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:54,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:54,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:54,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:54,943 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:54,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T22:25:54,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:54,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:54,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:54,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/7145c8902b944ab39777d7826cc84aa4 2024-11-20T22:25:54,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/b8f84af128a84613adb9badb8ab809d9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/b8f84af128a84613adb9badb8ab809d9 2024-11-20T22:25:54,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/b8f84af128a84613adb9badb8ab809d9, entries=200, sequenceid=354, filesize=14.4 K 2024-11-20T22:25:54,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a1cfa73174d04e5cb9c578b8dac3de7c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a1cfa73174d04e5cb9c578b8dac3de7c 2024-11-20T22:25:55,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a1cfa73174d04e5cb9c578b8dac3de7c, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T22:25:55,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/7145c8902b944ab39777d7826cc84aa4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/7145c8902b944ab39777d7826cc84aa4 2024-11-20T22:25:55,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/7145c8902b944ab39777d7826cc84aa4, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T22:25:55,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 62dd6d22774f5784522279eafe291710 in 981ms, sequenceid=354, compaction requested=true 2024-11-20T22:25:55,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:55,005 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:55,006 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:55,006 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:55,006 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:55,006 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/9f676883717e4c7a8e10a924c394d03c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c69e44bba74f4d198e6bfa78a09bef0a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/b8f84af128a84613adb9badb8ab809d9] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=39.0 K 2024-11-20T22:25:55,007 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f676883717e4c7a8e10a924c394d03c, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732141552137 2024-11-20T22:25:55,007 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c69e44bba74f4d198e6bfa78a09bef0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732141552189 2024-11-20T22:25:55,008 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8f84af128a84613adb9badb8ab809d9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141553392 2024-11-20T22:25:55,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:55,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:55,009 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:55,012 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:55,012 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:55,012 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:55,012 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fb53c712850841e9910b21bb1fd4c679, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fd3074762e544d89ac35eeab6329ba3c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a1cfa73174d04e5cb9c578b8dac3de7c] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=36.6 K 2024-11-20T22:25:55,013 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fb53c712850841e9910b21bb1fd4c679, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732141552137 2024-11-20T22:25:55,013 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fd3074762e544d89ac35eeab6329ba3c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732141552189 2024-11-20T22:25:55,014 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a1cfa73174d04e5cb9c578b8dac3de7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141553392 2024-11-20T22:25:55,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:55,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:55,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:55,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:55,021 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#468 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:55,022 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/f7ab89642fa3400ab82039d142cb84f6 is 50, key is test_row_0/A:col10/1732141554023/Put/seqid=0 2024-11-20T22:25:55,028 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:55,029 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/0211afe23629477abdb3c506cbcbd33e is 50, key is test_row_0/B:col10/1732141554023/Put/seqid=0 2024-11-20T22:25:55,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742379_1555 (size=12949) 2024-11-20T22:25:55,103 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T22:25:55,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:55,107 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:25:55,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:55,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:55,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:55,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:55,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:55,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:55,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/ce2232d6efd7447b925595a838c21847 is 50, key is test_row_0/A:col10/1732141554031/Put/seqid=0 2024-11-20T22:25:55,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742380_1556 (size=12949) 2024-11-20T22:25:55,131 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/0211afe23629477abdb3c506cbcbd33e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0211afe23629477abdb3c506cbcbd33e 2024-11-20T22:25:55,136 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 0211afe23629477abdb3c506cbcbd33e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:55,136 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:55,136 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141555009; duration=0sec 2024-11-20T22:25:55,136 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:55,136 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:55,136 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:55,137 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:55,137 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:55,137 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:55,137 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/b2eb7f4ceb9340ae989c3293aba2add0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/777f8e6dcec04664a6d2dd72475ec52e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/7145c8902b944ab39777d7826cc84aa4] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=36.6 K 2024-11-20T22:25:55,138 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting b2eb7f4ceb9340ae989c3293aba2add0, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732141552137 2024-11-20T22:25:55,138 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 777f8e6dcec04664a6d2dd72475ec52e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732141552189 2024-11-20T22:25:55,138 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7145c8902b944ab39777d7826cc84aa4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141553392 2024-11-20T22:25:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742381_1557 (size=12301) 2024-11-20T22:25:55,149 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/ce2232d6efd7447b925595a838c21847 2024-11-20T22:25:55,157 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:55,158 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/e48f675627514d278aaf144e31c77ee9 is 50, key is test_row_0/C:col10/1732141554023/Put/seqid=0 2024-11-20T22:25:55,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/38ba8e7a74f643cfbb18cc087d7d5c87 is 50, key is test_row_0/B:col10/1732141554031/Put/seqid=0 2024-11-20T22:25:55,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:55,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:55,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742382_1558 (size=12949) 2024-11-20T22:25:55,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742383_1559 (size=12301) 2024-11-20T22:25:55,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:55,249 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/38ba8e7a74f643cfbb18cc087d7d5c87 2024-11-20T22:25:55,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/0ac1e8a69ae946a58a8e3155e2cd12bc is 50, key is test_row_0/C:col10/1732141554031/Put/seqid=0 2024-11-20T22:25:55,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141615272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141615273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141615274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141615275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742384_1560 (size=12301) 2024-11-20T22:25:55,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141615387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141615387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141615387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141615387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,498 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/f7ab89642fa3400ab82039d142cb84f6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f7ab89642fa3400ab82039d142cb84f6 2024-11-20T22:25:55,506 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into f7ab89642fa3400ab82039d142cb84f6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:55,506 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:55,506 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141555005; duration=0sec 2024-11-20T22:25:55,506 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:55,506 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:55,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141615557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141615595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141615595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141615595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141615597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,655 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/e48f675627514d278aaf144e31c77ee9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e48f675627514d278aaf144e31c77ee9 2024-11-20T22:25:55,694 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into e48f675627514d278aaf144e31c77ee9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:55,694 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:55,694 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141555016; duration=0sec 2024-11-20T22:25:55,694 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:55,694 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:55,705 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/0ac1e8a69ae946a58a8e3155e2cd12bc 2024-11-20T22:25:55,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/ce2232d6efd7447b925595a838c21847 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2232d6efd7447b925595a838c21847 2024-11-20T22:25:55,726 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2232d6efd7447b925595a838c21847, entries=150, sequenceid=362, filesize=12.0 K 2024-11-20T22:25:55,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/38ba8e7a74f643cfbb18cc087d7d5c87 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/38ba8e7a74f643cfbb18cc087d7d5c87 2024-11-20T22:25:55,737 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/38ba8e7a74f643cfbb18cc087d7d5c87, entries=150, sequenceid=362, filesize=12.0 K 2024-11-20T22:25:55,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/0ac1e8a69ae946a58a8e3155e2cd12bc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0ac1e8a69ae946a58a8e3155e2cd12bc 2024-11-20T22:25:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:55,753 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0ac1e8a69ae946a58a8e3155e2cd12bc, entries=150, sequenceid=362, filesize=12.0 K 2024-11-20T22:25:55,753 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 62dd6d22774f5784522279eafe291710 in 647ms, sequenceid=362, compaction requested=false 2024-11-20T22:25:55,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:55,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:55,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-20T22:25:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-20T22:25:55,773 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-20T22:25:55,773 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1270 sec 2024-11-20T22:25:55,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.1420 sec 2024-11-20T22:25:55,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T22:25:55,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:55,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:55,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:55,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:55,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:55,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:55,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:55,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/8f6a65a527bc4904813c3fd661750a61 is 50, key is test_row_0/A:col10/1732141555273/Put/seqid=0 2024-11-20T22:25:55,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141615917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141615917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141615939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141615942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:55,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742385_1561 (size=12301) 2024-11-20T22:25:55,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/8f6a65a527bc4904813c3fd661750a61 2024-11-20T22:25:55,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/360c2578b7444125b6d5a86499070855 is 50, key is test_row_0/B:col10/1732141555273/Put/seqid=0 2024-11-20T22:25:56,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742386_1562 (size=12301) 2024-11-20T22:25:56,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141616040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141616040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141616053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141616056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141616252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141616253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141616261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141616261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/360c2578b7444125b6d5a86499070855 2024-11-20T22:25:56,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/773d818d360149ae97a66f02dd572bb9 is 50, key is test_row_0/C:col10/1732141555273/Put/seqid=0 2024-11-20T22:25:56,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742387_1563 (size=12301) 2024-11-20T22:25:56,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/773d818d360149ae97a66f02dd572bb9 2024-11-20T22:25:56,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/8f6a65a527bc4904813c3fd661750a61 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/8f6a65a527bc4904813c3fd661750a61 2024-11-20T22:25:56,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/8f6a65a527bc4904813c3fd661750a61, entries=150, sequenceid=394, filesize=12.0 K 2024-11-20T22:25:56,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/360c2578b7444125b6d5a86499070855 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/360c2578b7444125b6d5a86499070855 2024-11-20T22:25:56,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/360c2578b7444125b6d5a86499070855, entries=150, sequenceid=394, filesize=12.0 K 2024-11-20T22:25:56,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/773d818d360149ae97a66f02dd572bb9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/773d818d360149ae97a66f02dd572bb9 2024-11-20T22:25:56,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/773d818d360149ae97a66f02dd572bb9, entries=150, sequenceid=394, filesize=12.0 K 2024-11-20T22:25:56,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 62dd6d22774f5784522279eafe291710 in 601ms, sequenceid=394, compaction requested=true 2024-11-20T22:25:56,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:56,513 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:56,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:56,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:56,513 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:56,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:56,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:56,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:56,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:56,514 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:56,514 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:56,514 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:56,514 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f7ab89642fa3400ab82039d142cb84f6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2232d6efd7447b925595a838c21847, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/8f6a65a527bc4904813c3fd661750a61] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=36.7 K 2024-11-20T22:25:56,514 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7ab89642fa3400ab82039d142cb84f6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141553392 2024-11-20T22:25:56,516 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:56,516 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:56,516 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:56,516 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0211afe23629477abdb3c506cbcbd33e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/38ba8e7a74f643cfbb18cc087d7d5c87, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/360c2578b7444125b6d5a86499070855] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=36.7 K 2024-11-20T22:25:56,518 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0211afe23629477abdb3c506cbcbd33e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141553392 2024-11-20T22:25:56,518 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce2232d6efd7447b925595a838c21847, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732141554028 2024-11-20T22:25:56,519 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 38ba8e7a74f643cfbb18cc087d7d5c87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732141554028 2024-11-20T22:25:56,519 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f6a65a527bc4904813c3fd661750a61, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141555261 2024-11-20T22:25:56,519 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 360c2578b7444125b6d5a86499070855, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141555261 2024-11-20T22:25:56,540 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:56,541 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/368a620c0c7346acbfad3a035c1200e8 is 50, key is test_row_0/B:col10/1732141555273/Put/seqid=0 2024-11-20T22:25:56,552 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:56,553 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/ce2bde621dbc4ac6ad17c950e16c2664 is 50, key is test_row_0/A:col10/1732141555273/Put/seqid=0 2024-11-20T22:25:56,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:25:56,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:56,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:56,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:56,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:56,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742388_1564 (size=13051) 2024-11-20T22:25:56,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/3c71af9f6abd4fce8b1e247454f168d0 is 50, key is test_row_1/A:col10/1732141556568/Put/seqid=0 2024-11-20T22:25:56,589 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/368a620c0c7346acbfad3a035c1200e8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/368a620c0c7346acbfad3a035c1200e8 2024-11-20T22:25:56,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742389_1565 (size=13051) 2024-11-20T22:25:56,596 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 368a620c0c7346acbfad3a035c1200e8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:56,596 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:56,596 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141556513; duration=0sec 2024-11-20T22:25:56,597 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:56,597 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:56,597 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:56,598 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:56,598 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/ce2bde621dbc4ac6ad17c950e16c2664 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2bde621dbc4ac6ad17c950e16c2664 2024-11-20T22:25:56,598 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:56,598 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:56,599 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e48f675627514d278aaf144e31c77ee9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0ac1e8a69ae946a58a8e3155e2cd12bc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/773d818d360149ae97a66f02dd572bb9] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=36.7 K 2024-11-20T22:25:56,599 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e48f675627514d278aaf144e31c77ee9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732141553392 2024-11-20T22:25:56,599 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ac1e8a69ae946a58a8e3155e2cd12bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732141554028 2024-11-20T22:25:56,600 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 773d818d360149ae97a66f02dd572bb9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141555261 2024-11-20T22:25:56,603 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into ce2bde621dbc4ac6ad17c950e16c2664(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:56,603 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:56,603 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141556512; duration=0sec 2024-11-20T22:25:56,603 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:56,603 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:56,626 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#480 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:56,626 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/e3b85aa3098948468f92d7abe6184429 is 50, key is test_row_0/C:col10/1732141555273/Put/seqid=0 2024-11-20T22:25:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742390_1566 (size=14737) 2024-11-20T22:25:56,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/3c71af9f6abd4fce8b1e247454f168d0 2024-11-20T22:25:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742391_1567 (size=13051) 2024-11-20T22:25:56,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141616639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141616640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141616642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a17d737d7ac44b3694b5c0567511f25d is 50, key is test_row_1/B:col10/1732141556568/Put/seqid=0 2024-11-20T22:25:56,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141616643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,657 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/e3b85aa3098948468f92d7abe6184429 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e3b85aa3098948468f92d7abe6184429 2024-11-20T22:25:56,664 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into e3b85aa3098948468f92d7abe6184429(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:56,664 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:56,665 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141556513; duration=0sec 2024-11-20T22:25:56,665 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:56,665 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:56,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742392_1568 (size=9857) 2024-11-20T22:25:56,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a17d737d7ac44b3694b5c0567511f25d 2024-11-20T22:25:56,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/808067fa9bd64c679d38a55d5d26bbd7 is 50, key is test_row_1/C:col10/1732141556568/Put/seqid=0 2024-11-20T22:25:56,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742393_1569 (size=9857) 2024-11-20T22:25:56,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/808067fa9bd64c679d38a55d5d26bbd7 2024-11-20T22:25:56,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/3c71af9f6abd4fce8b1e247454f168d0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3c71af9f6abd4fce8b1e247454f168d0 2024-11-20T22:25:56,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T22:25:56,751 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T22:25:56,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3c71af9f6abd4fce8b1e247454f168d0, entries=200, sequenceid=405, filesize=14.4 K 2024-11-20T22:25:56,753 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:25:56,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/a17d737d7ac44b3694b5c0567511f25d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a17d737d7ac44b3694b5c0567511f25d 2024-11-20T22:25:56,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-20T22:25:56,754 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:25:56,754 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:25:56,755 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:25:56,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:56,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a17d737d7ac44b3694b5c0567511f25d, entries=100, sequenceid=405, filesize=9.6 K 2024-11-20T22:25:56,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/808067fa9bd64c679d38a55d5d26bbd7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/808067fa9bd64c679d38a55d5d26bbd7 2024-11-20T22:25:56,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/808067fa9bd64c679d38a55d5d26bbd7, entries=100, sequenceid=405, filesize=9.6 K 2024-11-20T22:25:56,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141616752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 62dd6d22774f5784522279eafe291710 in 196ms, sequenceid=405, compaction requested=false 2024-11-20T22:25:56,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:56,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:25:56,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 62dd6d22774f5784522279eafe291710 2024-11-20T22:25:56,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:56,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:56,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:56,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:56,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/e4cc30c54bfc4beaaf4445ecf8ffeae2 is 50, key is test_row_0/A:col10/1732141556627/Put/seqid=0 2024-11-20T22:25:56,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141616777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141616784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141616789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742394_1570 (size=17181) 2024-11-20T22:25:56,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:56,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141616890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141616891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141616897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,905 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:56,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:56,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:56,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:56,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:56,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:56,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:56,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:56,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:56,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141616966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,058 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:57,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:57,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:57,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141617097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141617098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141617109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,212 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:57,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:57,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/e4cc30c54bfc4beaaf4445ecf8ffeae2 2024-11-20T22:25:57,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/48c146d3c6374fa0b69b522060729771 is 50, key is test_row_0/B:col10/1732141556627/Put/seqid=0 2024-11-20T22:25:57,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141617274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742395_1571 (size=12301) 2024-11-20T22:25:57,352 DEBUG [Thread-2135 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51c10dfb to 127.0.0.1:51916 2024-11-20T22:25:57,352 DEBUG [Thread-2135 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:57,352 DEBUG [Thread-2133 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ab073d to 127.0.0.1:51916 2024-11-20T22:25:57,352 DEBUG [Thread-2133 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:57,356 DEBUG [Thread-2141 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ee1e5bc to 127.0.0.1:51916 2024-11-20T22:25:57,356 DEBUG [Thread-2141 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:57,358 DEBUG [Thread-2139 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a895ecf to 127.0.0.1:51916 2024-11-20T22:25:57,358 DEBUG [Thread-2139 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:57,359 DEBUG [Thread-2137 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x039117ee to 127.0.0.1:51916 2024-11-20T22:25:57,359 DEBUG [Thread-2137 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:57,369 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:57,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:57,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:57,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141617404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141617407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141617413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:57,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:57,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33040 deadline: 1732141617569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,570 DEBUG [Thread-2126 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., hostname=6365a1e51efd,46811,1732141422048, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:25:57,675 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:57,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:57,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/48c146d3c6374fa0b69b522060729771 2024-11-20T22:25:57,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/1e13c03229e2479591028c840b7b1be5 is 50, key is test_row_0/C:col10/1732141556627/Put/seqid=0 2024-11-20T22:25:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742396_1572 (size=12301) 2024-11-20T22:25:57,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732141617784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,827 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:57,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:57,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:57,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732141617906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33066 deadline: 1732141617912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:25:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33080 deadline: 1732141617918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,979 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:57,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:57,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:57,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:57,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:58,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/1e13c03229e2479591028c840b7b1be5 2024-11-20T22:25:58,132 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:58,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:58,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:58,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. as already flushing 2024-11-20T22:25:58,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:58,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:58,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:58,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/e4cc30c54bfc4beaaf4445ecf8ffeae2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/e4cc30c54bfc4beaaf4445ecf8ffeae2 2024-11-20T22:25:58,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:25:58,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/e4cc30c54bfc4beaaf4445ecf8ffeae2, entries=250, sequenceid=436, filesize=16.8 K 2024-11-20T22:25:58,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/48c146d3c6374fa0b69b522060729771 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/48c146d3c6374fa0b69b522060729771 2024-11-20T22:25:58,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/48c146d3c6374fa0b69b522060729771, entries=150, sequenceid=436, filesize=12.0 K 2024-11-20T22:25:58,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/1e13c03229e2479591028c840b7b1be5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/1e13c03229e2479591028c840b7b1be5 2024-11-20T22:25:58,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/1e13c03229e2479591028c840b7b1be5, entries=150, sequenceid=436, filesize=12.0 K 2024-11-20T22:25:58,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 62dd6d22774f5784522279eafe291710 in 1393ms, sequenceid=436, compaction requested=true 2024-11-20T22:25:58,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:58,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:25:58,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:58,160 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:58,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:25:58,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:58,160 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:58,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62dd6d22774f5784522279eafe291710:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:25:58,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:58,160 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44969 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:58,161 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/A is initiating minor compaction (all files) 2024-11-20T22:25:58,161 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/A in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:58,161 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2bde621dbc4ac6ad17c950e16c2664, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3c71af9f6abd4fce8b1e247454f168d0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/e4cc30c54bfc4beaaf4445ecf8ffeae2] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=43.9 K 2024-11-20T22:25:58,161 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:58,161 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/B is initiating minor compaction (all files) 2024-11-20T22:25:58,161 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/B in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:58,161 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/368a620c0c7346acbfad3a035c1200e8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a17d737d7ac44b3694b5c0567511f25d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/48c146d3c6374fa0b69b522060729771] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=34.4 K 2024-11-20T22:25:58,161 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce2bde621dbc4ac6ad17c950e16c2664, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141555261 2024-11-20T22:25:58,161 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 368a620c0c7346acbfad3a035c1200e8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141555261 2024-11-20T22:25:58,163 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a17d737d7ac44b3694b5c0567511f25d, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732141556566 2024-11-20T22:25:58,163 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c71af9f6abd4fce8b1e247454f168d0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732141555914 2024-11-20T22:25:58,163 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 48c146d3c6374fa0b69b522060729771, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732141556627 2024-11-20T22:25:58,163 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4cc30c54bfc4beaaf4445ecf8ffeae2, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732141556627 2024-11-20T22:25:58,174 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#B#compaction#486 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:58,175 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/4005192abf9d4e4ca138d1b07f2d8c1d is 50, key is test_row_0/B:col10/1732141556627/Put/seqid=0 2024-11-20T22:25:58,198 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#A#compaction#487 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:58,204 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/1b84380bf20040e1b2a34a280a28139d is 50, key is test_row_0/A:col10/1732141556627/Put/seqid=0 2024-11-20T22:25:58,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742397_1573 (size=13153) 2024-11-20T22:25:58,230 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/4005192abf9d4e4ca138d1b07f2d8c1d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4005192abf9d4e4ca138d1b07f2d8c1d 2024-11-20T22:25:58,241 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/B of 62dd6d22774f5784522279eafe291710 into 4005192abf9d4e4ca138d1b07f2d8c1d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:58,241 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:58,241 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/B, priority=13, startTime=1732141558160; duration=0sec 2024-11-20T22:25:58,241 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:25:58,242 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:B 2024-11-20T22:25:58,242 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:25:58,246 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:25:58,246 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 62dd6d22774f5784522279eafe291710/C is initiating minor compaction (all files) 2024-11-20T22:25:58,246 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62dd6d22774f5784522279eafe291710/C in TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:58,247 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e3b85aa3098948468f92d7abe6184429, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/808067fa9bd64c679d38a55d5d26bbd7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/1e13c03229e2479591028c840b7b1be5] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp, totalSize=34.4 K 2024-11-20T22:25:58,248 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e3b85aa3098948468f92d7abe6184429, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732141555261 2024-11-20T22:25:58,248 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 808067fa9bd64c679d38a55d5d26bbd7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732141556566 2024-11-20T22:25:58,250 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e13c03229e2479591028c840b7b1be5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732141556627 2024-11-20T22:25:58,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742398_1574 (size=13153) 2024-11-20T22:25:58,267 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/1b84380bf20040e1b2a34a280a28139d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/1b84380bf20040e1b2a34a280a28139d 2024-11-20T22:25:58,272 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62dd6d22774f5784522279eafe291710#C#compaction#488 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:25:58,273 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/9016227519c9421393f910504797db48 is 50, key is test_row_0/C:col10/1732141556627/Put/seqid=0 2024-11-20T22:25:58,275 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/A of 62dd6d22774f5784522279eafe291710 into 1b84380bf20040e1b2a34a280a28139d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:58,275 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:58,275 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/A, priority=13, startTime=1732141558159; duration=0sec 2024-11-20T22:25:58,275 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:58,275 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:A 2024-11-20T22:25:58,285 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:25:58,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T22:25:58,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:58,287 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:25:58,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:25:58,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:25:58,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:25:58,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:25:58,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742399_1575 (size=13153) 2024-11-20T22:25:58,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/0ad2da5dc1e746e5987aba5d24dd1c72 is 50, key is test_row_1/A:col10/1732141556774/Put/seqid=0 2024-11-20T22:25:58,316 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/9016227519c9421393f910504797db48 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/9016227519c9421393f910504797db48 2024-11-20T22:25:58,325 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62dd6d22774f5784522279eafe291710/C of 62dd6d22774f5784522279eafe291710 into 9016227519c9421393f910504797db48(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:25:58,325 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:58,325 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710., storeName=62dd6d22774f5784522279eafe291710/C, priority=13, startTime=1732141558160; duration=0sec 2024-11-20T22:25:58,325 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:25:58,325 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62dd6d22774f5784522279eafe291710:C 2024-11-20T22:25:58,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742400_1576 (size=9857) 2024-11-20T22:25:58,341 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/0ad2da5dc1e746e5987aba5d24dd1c72 2024-11-20T22:25:58,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6e9bd47466a04127a29e8f4c7ea91f18 is 50, key is test_row_1/B:col10/1732141556774/Put/seqid=0 2024-11-20T22:25:58,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742401_1577 (size=9857) 2024-11-20T22:25:58,371 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6e9bd47466a04127a29e8f4c7ea91f18 2024-11-20T22:25:58,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/3ff89da94f564e5f8bff007edf61db22 is 50, key is test_row_1/C:col10/1732141556774/Put/seqid=0 2024-11-20T22:25:58,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742402_1578 (size=9857) 2024-11-20T22:25:58,409 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/3ff89da94f564e5f8bff007edf61db22 2024-11-20T22:25:58,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/0ad2da5dc1e746e5987aba5d24dd1c72 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0ad2da5dc1e746e5987aba5d24dd1c72 2024-11-20T22:25:58,418 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0ad2da5dc1e746e5987aba5d24dd1c72, entries=100, sequenceid=446, filesize=9.6 K 2024-11-20T22:25:58,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/6e9bd47466a04127a29e8f4c7ea91f18 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6e9bd47466a04127a29e8f4c7ea91f18 2024-11-20T22:25:58,422 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6e9bd47466a04127a29e8f4c7ea91f18, entries=100, sequenceid=446, filesize=9.6 K 2024-11-20T22:25:58,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/3ff89da94f564e5f8bff007edf61db22 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/3ff89da94f564e5f8bff007edf61db22 2024-11-20T22:25:58,428 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/3ff89da94f564e5f8bff007edf61db22, entries=100, sequenceid=446, filesize=9.6 K 2024-11-20T22:25:58,428 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 62dd6d22774f5784522279eafe291710 in 141ms, sequenceid=446, compaction requested=false 2024-11-20T22:25:58,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:25:58,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:25:58,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-20T22:25:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-20T22:25:58,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T22:25:58,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6740 sec 2024-11-20T22:25:58,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.6770 sec 2024-11-20T22:25:58,789 DEBUG [Thread-2124 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3338bc39 to 127.0.0.1:51916 2024-11-20T22:25:58,790 DEBUG [Thread-2124 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T22:25:58,872 INFO [Thread-2132 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-20T22:25:58,923 DEBUG [Thread-2130 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167e1f2a to 127.0.0.1:51916 2024-11-20T22:25:58,923 DEBUG [Thread-2130 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:58,926 DEBUG [Thread-2122 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f31bf28 to 127.0.0.1:51916 2024-11-20T22:25:58,926 DEBUG [Thread-2122 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:25:58,934 DEBUG [Thread-2128 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e6756a1 to 127.0.0.1:51916 2024-11-20T22:25:58,934 DEBUG [Thread-2128 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:01,609 DEBUG [Thread-2126 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0cc902d0 to 127.0.0.1:51916 2024-11-20T22:26:01,609 DEBUG [Thread-2126 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1778 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5334 rows 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1768 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5304 rows 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1784 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5352 rows 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1786 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5358 rows 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1774 2024-11-20T22:26:01,609 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5321 rows 2024-11-20T22:26:01,609 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:26:01,610 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67f4ca2c to 127.0.0.1:51916 2024-11-20T22:26:01,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:01,614 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:26:01,617 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:26:01,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:01,628 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141561628"}]},"ts":"1732141561628"} 2024-11-20T22:26:01,635 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:26:01,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:26:01,665 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:26:01,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:26:01,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=62dd6d22774f5784522279eafe291710, UNASSIGN}] 2024-11-20T22:26:01,670 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=62dd6d22774f5784522279eafe291710, UNASSIGN 2024-11-20T22:26:01,671 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=62dd6d22774f5784522279eafe291710, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:01,675 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:26:01,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:26:01,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:26:01,828 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:01,828 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 62dd6d22774f5784522279eafe291710 2024-11-20T22:26:01,828 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:26:01,828 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 62dd6d22774f5784522279eafe291710, disabling compactions & flushes 2024-11-20T22:26:01,828 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:26:01,828 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:26:01,828 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. after waiting 0 ms 2024-11-20T22:26:01,829 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:26:01,829 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(2837): Flushing 62dd6d22774f5784522279eafe291710 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:26:01,829 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=A 2024-11-20T22:26:01,829 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:01,829 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=B 2024-11-20T22:26:01,829 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:01,829 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 62dd6d22774f5784522279eafe291710, store=C 2024-11-20T22:26:01,829 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:01,842 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/039e2145f6724c4c9f883222d7fe7819 is 50, key is test_row_0/A:col10/1732141561607/Put/seqid=0 2024-11-20T22:26:01,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742403_1579 (size=12301) 2024-11-20T22:26:01,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:26:02,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:26:02,293 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/039e2145f6724c4c9f883222d7fe7819 2024-11-20T22:26:02,312 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/9ed2e8f1fa7b4ef782979aecc4ae1d27 is 50, key is test_row_0/B:col10/1732141561607/Put/seqid=0 2024-11-20T22:26:02,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742404_1580 (size=12301) 2024-11-20T22:26:02,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:26:02,754 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/9ed2e8f1fa7b4ef782979aecc4ae1d27 2024-11-20T22:26:02,772 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/54ae9b9393de42ec95d13847cd0e92b4 is 50, key is test_row_0/C:col10/1732141561607/Put/seqid=0 2024-11-20T22:26:02,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742405_1581 (size=12301) 2024-11-20T22:26:02,789 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/54ae9b9393de42ec95d13847cd0e92b4 2024-11-20T22:26:02,854 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/A/039e2145f6724c4c9f883222d7fe7819 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/039e2145f6724c4c9f883222d7fe7819 2024-11-20T22:26:02,874 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/039e2145f6724c4c9f883222d7fe7819, entries=150, sequenceid=455, filesize=12.0 K 2024-11-20T22:26:02,880 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/B/9ed2e8f1fa7b4ef782979aecc4ae1d27 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9ed2e8f1fa7b4ef782979aecc4ae1d27 2024-11-20T22:26:02,886 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9ed2e8f1fa7b4ef782979aecc4ae1d27, entries=150, sequenceid=455, filesize=12.0 K 2024-11-20T22:26:02,888 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/.tmp/C/54ae9b9393de42ec95d13847cd0e92b4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/54ae9b9393de42ec95d13847cd0e92b4 2024-11-20T22:26:02,895 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/54ae9b9393de42ec95d13847cd0e92b4, entries=150, sequenceid=455, filesize=12.0 K 2024-11-20T22:26:02,903 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 62dd6d22774f5784522279eafe291710 in 1073ms, sequenceid=455, compaction requested=true 2024-11-20T22:26:02,915 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/5705ffdcbbb9439892531a7ef3e0ac56, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ff5b1e2bf01f4a5a8bde311dcf99cee9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/13e4fb36b2ad49248111c1f9dffd844a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/83da1e3c373648caad7c769560c04224, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/99c61628aff34e9a8bbcdaa020e64dc3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3fb55e98ef5143fa8d7a53e44f6c6361, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/7e117f6938b44296a2d336a7bb39cfb9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/a5e25116cc504e98ac2316ada000c7d9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c1038d373fc749d483872ea0342a413b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f02a7d48df5d4b999a2142782a24feae, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/51fccc25554a4d6a969cd8764d78a9bd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/40bcb49aaa594b9b8ad70e31ca0ed04b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/4e783a4660d54de299b974a98f2c8ed6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3ba7e998c60e43e7b46a322f9172ca5e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/28e278b7cf394dc08424d7f3184ed7b4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c058cc32d7a049c5aed37f46783d8718, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/9f676883717e4c7a8e10a924c394d03c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c69e44bba74f4d198e6bfa78a09bef0a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/b8f84af128a84613adb9badb8ab809d9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f7ab89642fa3400ab82039d142cb84f6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2232d6efd7447b925595a838c21847, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2bde621dbc4ac6ad17c950e16c2664, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/8f6a65a527bc4904813c3fd661750a61, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3c71af9f6abd4fce8b1e247454f168d0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/e4cc30c54bfc4beaaf4445ecf8ffeae2] to archive 2024-11-20T22:26:02,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:02,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/5705ffdcbbb9439892531a7ef3e0ac56 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/5705ffdcbbb9439892531a7ef3e0ac56 2024-11-20T22:26:02,954 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ff5b1e2bf01f4a5a8bde311dcf99cee9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ff5b1e2bf01f4a5a8bde311dcf99cee9 2024-11-20T22:26:02,967 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/13e4fb36b2ad49248111c1f9dffd844a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/13e4fb36b2ad49248111c1f9dffd844a 2024-11-20T22:26:02,969 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/83da1e3c373648caad7c769560c04224 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/83da1e3c373648caad7c769560c04224 2024-11-20T22:26:02,971 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/99c61628aff34e9a8bbcdaa020e64dc3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/99c61628aff34e9a8bbcdaa020e64dc3 2024-11-20T22:26:02,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3fb55e98ef5143fa8d7a53e44f6c6361 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3fb55e98ef5143fa8d7a53e44f6c6361 2024-11-20T22:26:02,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/7e117f6938b44296a2d336a7bb39cfb9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/7e117f6938b44296a2d336a7bb39cfb9 2024-11-20T22:26:03,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/a5e25116cc504e98ac2316ada000c7d9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/a5e25116cc504e98ac2316ada000c7d9 2024-11-20T22:26:03,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c1038d373fc749d483872ea0342a413b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c1038d373fc749d483872ea0342a413b 2024-11-20T22:26:03,029 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f02a7d48df5d4b999a2142782a24feae to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f02a7d48df5d4b999a2142782a24feae 2024-11-20T22:26:03,043 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/51fccc25554a4d6a969cd8764d78a9bd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/51fccc25554a4d6a969cd8764d78a9bd 2024-11-20T22:26:03,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/40bcb49aaa594b9b8ad70e31ca0ed04b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/40bcb49aaa594b9b8ad70e31ca0ed04b 2024-11-20T22:26:03,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/4e783a4660d54de299b974a98f2c8ed6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/4e783a4660d54de299b974a98f2c8ed6 2024-11-20T22:26:03,051 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3ba7e998c60e43e7b46a322f9172ca5e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3ba7e998c60e43e7b46a322f9172ca5e 2024-11-20T22:26:03,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/28e278b7cf394dc08424d7f3184ed7b4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/28e278b7cf394dc08424d7f3184ed7b4 2024-11-20T22:26:03,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c058cc32d7a049c5aed37f46783d8718 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c058cc32d7a049c5aed37f46783d8718 2024-11-20T22:26:03,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/9f676883717e4c7a8e10a924c394d03c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/9f676883717e4c7a8e10a924c394d03c 2024-11-20T22:26:03,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c69e44bba74f4d198e6bfa78a09bef0a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/c69e44bba74f4d198e6bfa78a09bef0a 2024-11-20T22:26:03,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/b8f84af128a84613adb9badb8ab809d9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/b8f84af128a84613adb9badb8ab809d9 2024-11-20T22:26:03,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f7ab89642fa3400ab82039d142cb84f6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/f7ab89642fa3400ab82039d142cb84f6 2024-11-20T22:26:03,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2232d6efd7447b925595a838c21847 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2232d6efd7447b925595a838c21847 2024-11-20T22:26:03,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2bde621dbc4ac6ad17c950e16c2664 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/ce2bde621dbc4ac6ad17c950e16c2664 2024-11-20T22:26:03,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/8f6a65a527bc4904813c3fd661750a61 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/8f6a65a527bc4904813c3fd661750a61 2024-11-20T22:26:03,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3c71af9f6abd4fce8b1e247454f168d0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/3c71af9f6abd4fce8b1e247454f168d0 2024-11-20T22:26:03,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/e4cc30c54bfc4beaaf4445ecf8ffeae2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/e4cc30c54bfc4beaaf4445ecf8ffeae2 2024-11-20T22:26:03,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0316cb89ac24407fb325fbc70eb42331, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/047f4bb8e0c9415882e66830ab2a3351, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/43d957b278494428a466526bb2dd14d2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/22a793a44a9a461e948dd1ffc4480627, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/208b217dc54845d3aa1f33354cf3a41f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0a39da65585441069c6010402b12c78b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9394f1b4febb4ab18c9bfdfa9457547f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6096e5ef1c8a45b9a2675014734037c6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6b5391098b174ffdac98243a0af29bba, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/5e339d2ee5904e2fadbfa6c32c4b4dac, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6237b06610054348bc1e56f2067f5721, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/7bb95efd03d84957a707789c6ac51ccd, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4479633cf2fb4dfba4f71e7feff74fd5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fa6a61401814e4d98b9816104edb5dc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a39f123da83241d5a9659c7095138f75, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fb53c712850841e9910b21bb1fd4c679, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/59a290b7bde344188ce8b0f910b7d81b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fd3074762e544d89ac35eeab6329ba3c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0211afe23629477abdb3c506cbcbd33e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a1cfa73174d04e5cb9c578b8dac3de7c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/38ba8e7a74f643cfbb18cc087d7d5c87, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/368a620c0c7346acbfad3a035c1200e8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/360c2578b7444125b6d5a86499070855, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a17d737d7ac44b3694b5c0567511f25d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/48c146d3c6374fa0b69b522060729771] to archive 2024-11-20T22:26:03,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:03,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0316cb89ac24407fb325fbc70eb42331 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0316cb89ac24407fb325fbc70eb42331 2024-11-20T22:26:03,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/047f4bb8e0c9415882e66830ab2a3351 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/047f4bb8e0c9415882e66830ab2a3351 2024-11-20T22:26:03,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/43d957b278494428a466526bb2dd14d2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/43d957b278494428a466526bb2dd14d2 2024-11-20T22:26:03,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/22a793a44a9a461e948dd1ffc4480627 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/22a793a44a9a461e948dd1ffc4480627 2024-11-20T22:26:03,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/208b217dc54845d3aa1f33354cf3a41f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/208b217dc54845d3aa1f33354cf3a41f 2024-11-20T22:26:03,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0a39da65585441069c6010402b12c78b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0a39da65585441069c6010402b12c78b 2024-11-20T22:26:03,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9394f1b4febb4ab18c9bfdfa9457547f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9394f1b4febb4ab18c9bfdfa9457547f 2024-11-20T22:26:03,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6096e5ef1c8a45b9a2675014734037c6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6096e5ef1c8a45b9a2675014734037c6 2024-11-20T22:26:03,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6b5391098b174ffdac98243a0af29bba to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6b5391098b174ffdac98243a0af29bba 2024-11-20T22:26:03,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/5e339d2ee5904e2fadbfa6c32c4b4dac to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/5e339d2ee5904e2fadbfa6c32c4b4dac 2024-11-20T22:26:03,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6237b06610054348bc1e56f2067f5721 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6237b06610054348bc1e56f2067f5721 2024-11-20T22:26:03,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/7bb95efd03d84957a707789c6ac51ccd to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/7bb95efd03d84957a707789c6ac51ccd 2024-11-20T22:26:03,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4479633cf2fb4dfba4f71e7feff74fd5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4479633cf2fb4dfba4f71e7feff74fd5 2024-11-20T22:26:03,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fa6a61401814e4d98b9816104edb5dc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/2fa6a61401814e4d98b9816104edb5dc 2024-11-20T22:26:03,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a39f123da83241d5a9659c7095138f75 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a39f123da83241d5a9659c7095138f75 2024-11-20T22:26:03,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fb53c712850841e9910b21bb1fd4c679 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fb53c712850841e9910b21bb1fd4c679 2024-11-20T22:26:03,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/59a290b7bde344188ce8b0f910b7d81b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/59a290b7bde344188ce8b0f910b7d81b 2024-11-20T22:26:03,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fd3074762e544d89ac35eeab6329ba3c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/fd3074762e544d89ac35eeab6329ba3c 2024-11-20T22:26:03,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0211afe23629477abdb3c506cbcbd33e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/0211afe23629477abdb3c506cbcbd33e 2024-11-20T22:26:03,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a1cfa73174d04e5cb9c578b8dac3de7c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a1cfa73174d04e5cb9c578b8dac3de7c 2024-11-20T22:26:03,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/38ba8e7a74f643cfbb18cc087d7d5c87 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/38ba8e7a74f643cfbb18cc087d7d5c87 2024-11-20T22:26:03,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/368a620c0c7346acbfad3a035c1200e8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/368a620c0c7346acbfad3a035c1200e8 2024-11-20T22:26:03,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/360c2578b7444125b6d5a86499070855 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/360c2578b7444125b6d5a86499070855 2024-11-20T22:26:03,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a17d737d7ac44b3694b5c0567511f25d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/a17d737d7ac44b3694b5c0567511f25d 2024-11-20T22:26:03,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/48c146d3c6374fa0b69b522060729771 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/48c146d3c6374fa0b69b522060729771 2024-11-20T22:26:03,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/631372ddd34b4ccf910c321aac95c3d9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/a445ea0a7e1d4de59dc1f88a1d2e4244, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ea776bf7842a4d13b193f7d9998c7c93, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/df3f9ef147214f3ba5810eaec1faa2a8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/caec78dfda934d00b6aa9db96ded73a8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8a79fa88e7a947e8ac9326bb361c9b23, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5485f584570e404e89525d3243a397e1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/35a27dbf25b74fff8fdec5b919157735, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/9465ea275b94404fa16672b9a86376f3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/03506d0415774565bb08a387a21aa030, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/81f7aa0c149b433f8085fa44703567df, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/70533e668d77489d9d36e0c01b003a88, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0b8015a4a1a7490181930cc7814bdbd7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/fa7b3c5045664841bc3e5503ba5fc28e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5d99220ec28e48aaac65c08f7668d233, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/b2eb7f4ceb9340ae989c3293aba2add0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/c720defc07ac44c294eaa1ef7f6d2f6f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/777f8e6dcec04664a6d2dd72475ec52e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e48f675627514d278aaf144e31c77ee9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/7145c8902b944ab39777d7826cc84aa4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0ac1e8a69ae946a58a8e3155e2cd12bc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e3b85aa3098948468f92d7abe6184429, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/773d818d360149ae97a66f02dd572bb9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/808067fa9bd64c679d38a55d5d26bbd7, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/1e13c03229e2479591028c840b7b1be5] to archive 2024-11-20T22:26:03,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:03,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/631372ddd34b4ccf910c321aac95c3d9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/631372ddd34b4ccf910c321aac95c3d9 2024-11-20T22:26:03,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/a445ea0a7e1d4de59dc1f88a1d2e4244 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/a445ea0a7e1d4de59dc1f88a1d2e4244 2024-11-20T22:26:03,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ea776bf7842a4d13b193f7d9998c7c93 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/ea776bf7842a4d13b193f7d9998c7c93 2024-11-20T22:26:03,226 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/df3f9ef147214f3ba5810eaec1faa2a8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/df3f9ef147214f3ba5810eaec1faa2a8 2024-11-20T22:26:03,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/caec78dfda934d00b6aa9db96ded73a8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/caec78dfda934d00b6aa9db96ded73a8 2024-11-20T22:26:03,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8a79fa88e7a947e8ac9326bb361c9b23 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/8a79fa88e7a947e8ac9326bb361c9b23 2024-11-20T22:26:03,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5485f584570e404e89525d3243a397e1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5485f584570e404e89525d3243a397e1 2024-11-20T22:26:03,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/35a27dbf25b74fff8fdec5b919157735 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/35a27dbf25b74fff8fdec5b919157735 2024-11-20T22:26:03,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/9465ea275b94404fa16672b9a86376f3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/9465ea275b94404fa16672b9a86376f3 2024-11-20T22:26:03,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/03506d0415774565bb08a387a21aa030 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/03506d0415774565bb08a387a21aa030 2024-11-20T22:26:03,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/81f7aa0c149b433f8085fa44703567df to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/81f7aa0c149b433f8085fa44703567df 2024-11-20T22:26:03,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/70533e668d77489d9d36e0c01b003a88 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/70533e668d77489d9d36e0c01b003a88 2024-11-20T22:26:03,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0b8015a4a1a7490181930cc7814bdbd7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0b8015a4a1a7490181930cc7814bdbd7 2024-11-20T22:26:03,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/fa7b3c5045664841bc3e5503ba5fc28e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/fa7b3c5045664841bc3e5503ba5fc28e 2024-11-20T22:26:03,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5d99220ec28e48aaac65c08f7668d233 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/5d99220ec28e48aaac65c08f7668d233 2024-11-20T22:26:03,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/b2eb7f4ceb9340ae989c3293aba2add0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/b2eb7f4ceb9340ae989c3293aba2add0 2024-11-20T22:26:03,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/c720defc07ac44c294eaa1ef7f6d2f6f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/c720defc07ac44c294eaa1ef7f6d2f6f 2024-11-20T22:26:03,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/777f8e6dcec04664a6d2dd72475ec52e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/777f8e6dcec04664a6d2dd72475ec52e 2024-11-20T22:26:03,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e48f675627514d278aaf144e31c77ee9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e48f675627514d278aaf144e31c77ee9 2024-11-20T22:26:03,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/7145c8902b944ab39777d7826cc84aa4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/7145c8902b944ab39777d7826cc84aa4 2024-11-20T22:26:03,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0ac1e8a69ae946a58a8e3155e2cd12bc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/0ac1e8a69ae946a58a8e3155e2cd12bc 2024-11-20T22:26:03,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e3b85aa3098948468f92d7abe6184429 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/e3b85aa3098948468f92d7abe6184429 2024-11-20T22:26:03,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/773d818d360149ae97a66f02dd572bb9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/773d818d360149ae97a66f02dd572bb9 2024-11-20T22:26:03,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/808067fa9bd64c679d38a55d5d26bbd7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/808067fa9bd64c679d38a55d5d26bbd7 2024-11-20T22:26:03,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/1e13c03229e2479591028c840b7b1be5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/1e13c03229e2479591028c840b7b1be5 2024-11-20T22:26:03,327 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/recovered.edits/458.seqid, newMaxSeqId=458, maxSeqId=1 2024-11-20T22:26:03,329 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710. 2024-11-20T22:26:03,329 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 62dd6d22774f5784522279eafe291710: 2024-11-20T22:26:03,335 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 62dd6d22774f5784522279eafe291710 2024-11-20T22:26:03,339 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=62dd6d22774f5784522279eafe291710, regionState=CLOSED 2024-11-20T22:26:03,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-20T22:26:03,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 62dd6d22774f5784522279eafe291710, server=6365a1e51efd,46811,1732141422048 in 1.6670 sec 2024-11-20T22:26:03,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-20T22:26:03,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=62dd6d22774f5784522279eafe291710, UNASSIGN in 1.6750 sec 2024-11-20T22:26:03,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T22:26:03,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6790 sec 2024-11-20T22:26:03,348 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141563348"}]},"ts":"1732141563348"} 2024-11-20T22:26:03,349 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:26:03,359 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:26:03,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7430 sec 2024-11-20T22:26:03,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T22:26:03,755 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-20T22:26:03,755 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:26:03,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:03,757 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:03,758 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=148, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:03,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T22:26:03,774 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710 2024-11-20T22:26:03,780 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/recovered.edits] 2024-11-20T22:26:03,786 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/039e2145f6724c4c9f883222d7fe7819 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/039e2145f6724c4c9f883222d7fe7819 2024-11-20T22:26:03,789 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0ad2da5dc1e746e5987aba5d24dd1c72 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/0ad2da5dc1e746e5987aba5d24dd1c72 2024-11-20T22:26:03,791 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/1b84380bf20040e1b2a34a280a28139d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/A/1b84380bf20040e1b2a34a280a28139d 2024-11-20T22:26:03,794 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4005192abf9d4e4ca138d1b07f2d8c1d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/4005192abf9d4e4ca138d1b07f2d8c1d 2024-11-20T22:26:03,797 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6e9bd47466a04127a29e8f4c7ea91f18 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/6e9bd47466a04127a29e8f4c7ea91f18 2024-11-20T22:26:03,803 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9ed2e8f1fa7b4ef782979aecc4ae1d27 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/B/9ed2e8f1fa7b4ef782979aecc4ae1d27 2024-11-20T22:26:03,841 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/3ff89da94f564e5f8bff007edf61db22 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/3ff89da94f564e5f8bff007edf61db22 2024-11-20T22:26:03,847 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/54ae9b9393de42ec95d13847cd0e92b4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/54ae9b9393de42ec95d13847cd0e92b4 2024-11-20T22:26:03,857 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/9016227519c9421393f910504797db48 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/C/9016227519c9421393f910504797db48 2024-11-20T22:26:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T22:26:03,860 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/recovered.edits/458.seqid to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710/recovered.edits/458.seqid 2024-11-20T22:26:03,861 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/62dd6d22774f5784522279eafe291710 2024-11-20T22:26:03,861 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:26:03,863 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=148, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:03,865 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:26:03,878 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:26:03,879 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=148, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:03,880 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:26:03,880 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141563880"}]},"ts":"9223372036854775807"} 2024-11-20T22:26:03,882 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:26:03,882 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 62dd6d22774f5784522279eafe291710, NAME => 'TestAcidGuarantees,,1732141534843.62dd6d22774f5784522279eafe291710.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:26:03,882 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:26:03,883 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141563883"}]},"ts":"9223372036854775807"} 2024-11-20T22:26:03,886 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:26:03,935 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=148, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:03,936 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 180 msec 2024-11-20T22:26:04,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T22:26:04,060 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-20T22:26:04,085 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=238 (was 241), OpenFileDescriptor=449 (was 460), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1102 (was 1046) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=868 (was 1746) 2024-11-20T22:26:04,114 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=238, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=1102, ProcessCount=11, AvailableMemoryMB=866 2024-11-20T22:26:04,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:26:04,120 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:26:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:04,128 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T22:26:04,128 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,129 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T22:26:04,130 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 149 2024-11-20T22:26:04,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:26:04,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742406_1582 (size=963) 2024-11-20T22:26:04,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:26:04,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:26:04,585 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72 2024-11-20T22:26:04,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742407_1583 (size=53) 2024-11-20T22:26:04,593 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:26:04,593 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 1d38cbecc23f382ec5e2809846caa111, disabling compactions & flushes 2024-11-20T22:26:04,593 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:04,593 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:04,593 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. after waiting 0 ms 2024-11-20T22:26:04,593 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:04,593 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:04,593 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:04,594 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T22:26:04,594 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732141564594"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732141564594"}]},"ts":"1732141564594"} 2024-11-20T22:26:04,596 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T22:26:04,598 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T22:26:04,598 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141564598"}]},"ts":"1732141564598"} 2024-11-20T22:26:04,599 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T22:26:04,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, ASSIGN}] 2024-11-20T22:26:04,653 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, ASSIGN 2024-11-20T22:26:04,653 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, ASSIGN; state=OFFLINE, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=false 2024-11-20T22:26:04,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:26:04,809 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=1d38cbecc23f382ec5e2809846caa111, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:04,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; OpenRegionProcedure 1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:26:04,962 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:04,965 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:04,965 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:26:04,966 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,966 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:26:04,966 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,966 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,971 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,972 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:26:04,972 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d38cbecc23f382ec5e2809846caa111 columnFamilyName A 2024-11-20T22:26:04,972 DEBUG [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,973 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(327): Store=1d38cbecc23f382ec5e2809846caa111/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:26:04,973 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,974 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:26:04,974 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d38cbecc23f382ec5e2809846caa111 columnFamilyName B 2024-11-20T22:26:04,974 DEBUG [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,974 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(327): Store=1d38cbecc23f382ec5e2809846caa111/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:26:04,974 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,975 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:26:04,975 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d38cbecc23f382ec5e2809846caa111 columnFamilyName C 2024-11-20T22:26:04,975 DEBUG [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:04,976 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(327): Store=1d38cbecc23f382ec5e2809846caa111/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:26:04,976 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:04,977 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,977 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,978 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:26:04,979 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:04,980 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T22:26:04,981 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 1d38cbecc23f382ec5e2809846caa111; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64346953, jitterRate=-0.041155681014060974}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:26:04,982 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:04,983 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., pid=151, masterSystemTime=1732141564962 2024-11-20T22:26:04,985 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=1d38cbecc23f382ec5e2809846caa111, regionState=OPEN, openSeqNum=2, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:04,987 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:04,987 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:04,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-20T22:26:04,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; OpenRegionProcedure 1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 in 176 msec 2024-11-20T22:26:04,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-20T22:26:04,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, ASSIGN in 336 msec 2024-11-20T22:26:04,989 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T22:26:04,989 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141564989"}]},"ts":"1732141564989"} 2024-11-20T22:26:04,990 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T22:26:05,003 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T22:26:05,009 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 883 msec 2024-11-20T22:26:05,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T22:26:05,237 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-11-20T22:26:05,239 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ae9fdd3 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41782d17 2024-11-20T22:26:05,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e212924, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:05,284 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:05,285 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42962, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:05,286 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T22:26:05,287 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T22:26:05,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T22:26:05,289 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T22:26:05,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:05,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742408_1584 (size=999) 2024-11-20T22:26:05,711 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T22:26:05,711 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T22:26:05,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:26:05,716 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, REOPEN/MOVE}] 2024-11-20T22:26:05,720 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, REOPEN/MOVE 2024-11-20T22:26:05,721 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=1d38cbecc23f382ec5e2809846caa111, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:05,722 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:26:05,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; CloseRegionProcedure 1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:26:05,874 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:05,874 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(124): Close 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:05,874 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:26:05,874 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1681): Closing 1d38cbecc23f382ec5e2809846caa111, disabling compactions & flushes 2024-11-20T22:26:05,874 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:05,874 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:05,874 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. after waiting 0 ms 2024-11-20T22:26:05,874 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:05,885 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T22:26:05,886 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:05,886 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1635): Region close journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:05,886 WARN [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionServer(3786): Not adding moved region record: 1d38cbecc23f382ec5e2809846caa111 to self. 2024-11-20T22:26:05,888 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(170): Closed 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:05,888 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=1d38cbecc23f382ec5e2809846caa111, regionState=CLOSED 2024-11-20T22:26:05,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-20T22:26:05,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseRegionProcedure 1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 in 167 msec 2024-11-20T22:26:05,891 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, REOPEN/MOVE; state=CLOSED, location=6365a1e51efd,46811,1732141422048; forceNewPlan=false, retain=true 2024-11-20T22:26:06,041 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=1d38cbecc23f382ec5e2809846caa111, regionState=OPENING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=154, state=RUNNABLE; OpenRegionProcedure 1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:26:06,194 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,197 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:06,197 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7285): Opening region: {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} 2024-11-20T22:26:06,197 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,198 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T22:26:06,198 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7327): checking encryption for 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,198 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7330): checking classloading for 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,200 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,201 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:26:06,201 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d38cbecc23f382ec5e2809846caa111 columnFamilyName A 2024-11-20T22:26:06,203 DEBUG [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,207 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(327): Store=1d38cbecc23f382ec5e2809846caa111/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:26:06,208 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,209 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:26:06,209 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d38cbecc23f382ec5e2809846caa111 columnFamilyName B 2024-11-20T22:26:06,209 DEBUG [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,210 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(327): Store=1d38cbecc23f382ec5e2809846caa111/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:26:06,210 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,212 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T22:26:06,212 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d38cbecc23f382ec5e2809846caa111 columnFamilyName C 2024-11-20T22:26:06,212 DEBUG [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,213 INFO [StoreOpener-1d38cbecc23f382ec5e2809846caa111-1 {}] regionserver.HStore(327): Store=1d38cbecc23f382ec5e2809846caa111/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T22:26:06,213 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:06,214 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,215 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,217 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T22:26:06,219 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1085): writing seq id for 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,221 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1102): Opened 1d38cbecc23f382ec5e2809846caa111; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60667114, jitterRate=-0.09598955512046814}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T22:26:06,222 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1001): Region open journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:06,224 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., pid=156, masterSystemTime=1732141566194 2024-11-20T22:26:06,225 DEBUG [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:06,225 INFO [RS_OPEN_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:06,226 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=1d38cbecc23f382ec5e2809846caa111, regionState=OPEN, openSeqNum=5, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=154 2024-11-20T22:26:06,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=154, state=SUCCESS; OpenRegionProcedure 1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 in 185 msec 2024-11-20T22:26:06,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-20T22:26:06,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, REOPEN/MOVE in 512 msec 2024-11-20T22:26:06,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-20T22:26:06,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 516 msec 2024-11-20T22:26:06,233 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 943 msec 2024-11-20T22:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T22:26:06,236 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62fe681e to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@626ee1c3 2024-11-20T22:26:06,271 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45219ca3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,272 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x73e77a91 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6612edd7 2024-11-20T22:26:06,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f1fb132, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,295 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x470e9e22 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3603af41 2024-11-20T22:26:06,330 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64470e92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,331 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x722e4d03 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59fc8da0 2024-11-20T22:26:06,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5932b93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,348 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x55c3d878 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68efdeed 2024-11-20T22:26:06,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74bcafd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,398 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x520ba63a to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43d84184 2024-11-20T22:26:06,439 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ca2054e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,440 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ff94e66 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5eace07c 2024-11-20T22:26:06,466 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72881692, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,467 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x57aa792e to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e3cdca7 2024-11-20T22:26:06,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cb26b74, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,542 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x362507f8 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3e743c7f 2024-11-20T22:26:06,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71d2090e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,585 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ee91d52 to 127.0.0.1:51916 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51615734 2024-11-20T22:26:06,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@309aae6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T22:26:06,640 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-20T22:26:06,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:26:06,642 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:06,643 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:06,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:06,651 DEBUG [hconnection-0x5dbe7c73-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,652 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42964, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,659 DEBUG [hconnection-0x6321b39-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,659 DEBUG [hconnection-0x71c0df8e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,660 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,660 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,678 DEBUG [hconnection-0x2fb85d1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:06,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:06,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:06,680 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:06,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:06,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:06,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:06,698 DEBUG [hconnection-0x725080be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,699 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,707 DEBUG [hconnection-0x4a8a5b8b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,708 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,723 DEBUG [hconnection-0x63dd5ff7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,724 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,731 DEBUG [hconnection-0x6494dc56-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,732 DEBUG [hconnection-0x1687a14a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,732 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43014, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,733 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:26:06,746 DEBUG [hconnection-0x223cd09c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T22:26:06,747 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T22:26:06,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207c0c64dc4e1e4a27992b98a1bb692f59_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_1/A:col10/1732141566664/Put/seqid=0 2024-11-20T22:26:06,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141626750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141626752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141626752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141626757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141626751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742409_1585 (size=9714) 2024-11-20T22:26:06,776 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:06,781 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207c0c64dc4e1e4a27992b98a1bb692f59_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c0c64dc4e1e4a27992b98a1bb692f59_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:06,782 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/2275c30e30cb45b083ff43318caff0cc, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:06,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/2275c30e30cb45b083ff43318caff0cc is 175, key is test_row_1/A:col10/1732141566664/Put/seqid=0 2024-11-20T22:26:06,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:06,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:06,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:06,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:06,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742410_1586 (size=22361) 2024-11-20T22:26:06,807 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/2275c30e30cb45b083ff43318caff0cc 2024-11-20T22:26:06,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/fef794012ca84443bb837adb6eab9959 is 50, key is test_row_1/B:col10/1732141566664/Put/seqid=0 2024-11-20T22:26:06,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742411_1587 (size=9657) 2024-11-20T22:26:06,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/fef794012ca84443bb837adb6eab9959 2024-11-20T22:26:06,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141626858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141626858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141626858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141626863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:06,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141626864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/42d2ff01a4514ac89670e69880ac39e0 is 50, key is test_row_1/C:col10/1732141566664/Put/seqid=0 2024-11-20T22:26:06,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742412_1588 (size=9657) 2024-11-20T22:26:06,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/42d2ff01a4514ac89670e69880ac39e0 2024-11-20T22:26:06,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:26:06,947 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:06,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:06,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:06,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:06,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:06,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:06,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/2275c30e30cb45b083ff43318caff0cc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2275c30e30cb45b083ff43318caff0cc 2024-11-20T22:26:06,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2275c30e30cb45b083ff43318caff0cc, entries=100, sequenceid=15, filesize=21.8 K 2024-11-20T22:26:06,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/fef794012ca84443bb837adb6eab9959 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/fef794012ca84443bb837adb6eab9959 2024-11-20T22:26:06,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/fef794012ca84443bb837adb6eab9959, entries=100, sequenceid=15, filesize=9.4 K 2024-11-20T22:26:06,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/42d2ff01a4514ac89670e69880ac39e0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/42d2ff01a4514ac89670e69880ac39e0 2024-11-20T22:26:06,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/42d2ff01a4514ac89670e69880ac39e0, entries=100, sequenceid=15, filesize=9.4 K 2024-11-20T22:26:06,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 1d38cbecc23f382ec5e2809846caa111 in 304ms, sequenceid=15, compaction requested=false 2024-11-20T22:26:06,983 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T22:26:06,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:07,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:07,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T22:26:07,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:07,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:07,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:07,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:07,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:07,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:07,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141627069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141627068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141627070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141627070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141627071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120af0fbeb0be074e2aab27075ed1ed61db_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141566747/Put/seqid=0 2024-11-20T22:26:07,102 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:07,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:07,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742413_1589 (size=12154) 2024-11-20T22:26:07,125 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:07,134 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120af0fbeb0be074e2aab27075ed1ed61db_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af0fbeb0be074e2aab27075ed1ed61db_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:07,135 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/0718c620ccdd4280aac0b7a7f47bbd8e, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:07,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/0718c620ccdd4280aac0b7a7f47bbd8e is 175, key is test_row_0/A:col10/1732141566747/Put/seqid=0 2024-11-20T22:26:07,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742414_1590 (size=30955) 2024-11-20T22:26:07,174 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=44, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/0718c620ccdd4280aac0b7a7f47bbd8e 2024-11-20T22:26:07,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141627176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141627175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141627176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141627179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/1d1f1733451f4f0ab7f72f2a5c42efab is 50, key is test_row_0/B:col10/1732141566747/Put/seqid=0 2024-11-20T22:26:07,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742415_1591 (size=12001) 2024-11-20T22:26:07,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:26:07,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:07,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:07,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141627375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141627382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141627382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141627383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141627389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,410 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:07,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:07,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,563 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:07,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:07,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/1d1f1733451f4f0ab7f72f2a5c42efab 2024-11-20T22:26:07,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/9a3b59419e3949fb87b8fa4b84a23bc9 is 50, key is test_row_0/C:col10/1732141566747/Put/seqid=0 2024-11-20T22:26:07,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141627692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141627692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141627692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141627692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742416_1592 (size=12001) 2024-11-20T22:26:07,716 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:07,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:07,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:26:07,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:07,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:07,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:07,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:07,870 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:07,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141627882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,025 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:08,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:08,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:08,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:08,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:08,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/9a3b59419e3949fb87b8fa4b84a23bc9 2024-11-20T22:26:08,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/0718c620ccdd4280aac0b7a7f47bbd8e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0718c620ccdd4280aac0b7a7f47bbd8e 2024-11-20T22:26:08,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0718c620ccdd4280aac0b7a7f47bbd8e, entries=150, sequenceid=44, filesize=30.2 K 2024-11-20T22:26:08,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/1d1f1733451f4f0ab7f72f2a5c42efab as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/1d1f1733451f4f0ab7f72f2a5c42efab 2024-11-20T22:26:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/1d1f1733451f4f0ab7f72f2a5c42efab, entries=150, sequenceid=44, filesize=11.7 K 2024-11-20T22:26:08,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/9a3b59419e3949fb87b8fa4b84a23bc9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9a3b59419e3949fb87b8fa4b84a23bc9 2024-11-20T22:26:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9a3b59419e3949fb87b8fa4b84a23bc9, entries=150, sequenceid=44, filesize=11.7 K 2024-11-20T22:26:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 1d38cbecc23f382ec5e2809846caa111 in 1104ms, sequenceid=44, compaction requested=false 2024-11-20T22:26:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,187 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T22:26:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:08,188 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T22:26:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120722043a241414522a74dc51807eba0ea_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141567069/Put/seqid=0 2024-11-20T22:26:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742417_1593 (size=9714) 2024-11-20T22:26:08,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,249 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120722043a241414522a74dc51807eba0ea_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120722043a241414522a74dc51807eba0ea_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:08,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/7e9e6828ff7e40d199be25e54f71ee8b, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:08,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/7e9e6828ff7e40d199be25e54f71ee8b is 175, key is test_row_0/A:col10/1732141567069/Put/seqid=0 2024-11-20T22:26:08,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141628254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141628256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141628263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141628263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742418_1594 (size=22361) 2024-11-20T22:26:08,279 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/7e9e6828ff7e40d199be25e54f71ee8b 2024-11-20T22:26:08,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/db2b124e55df4db7805de73a41bf021e is 50, key is test_row_0/B:col10/1732141567069/Put/seqid=0 2024-11-20T22:26:08,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742419_1595 (size=9657) 2024-11-20T22:26:08,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141628360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141628367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141628367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141628367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141628564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141628573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141628573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141628578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,737 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/db2b124e55df4db7805de73a41bf021e 2024-11-20T22:26:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:26:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d95aefd3b0e347f19b2d1852b84a2635 is 50, key is test_row_0/C:col10/1732141567069/Put/seqid=0 2024-11-20T22:26:08,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742420_1596 (size=9657) 2024-11-20T22:26:08,809 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d95aefd3b0e347f19b2d1852b84a2635 2024-11-20T22:26:08,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/7e9e6828ff7e40d199be25e54f71ee8b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/7e9e6828ff7e40d199be25e54f71ee8b 2024-11-20T22:26:08,817 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/7e9e6828ff7e40d199be25e54f71ee8b, entries=100, sequenceid=51, filesize=21.8 K 2024-11-20T22:26:08,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/db2b124e55df4db7805de73a41bf021e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/db2b124e55df4db7805de73a41bf021e 2024-11-20T22:26:08,823 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/db2b124e55df4db7805de73a41bf021e, entries=100, sequenceid=51, filesize=9.4 K 2024-11-20T22:26:08,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d95aefd3b0e347f19b2d1852b84a2635 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d95aefd3b0e347f19b2d1852b84a2635 2024-11-20T22:26:08,828 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d95aefd3b0e347f19b2d1852b84a2635, entries=100, sequenceid=51, filesize=9.4 K 2024-11-20T22:26:08,829 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 1d38cbecc23f382ec5e2809846caa111 in 640ms, sequenceid=51, compaction requested=true 2024-11-20T22:26:08,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:08,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:08,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-20T22:26:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-20T22:26:08,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-20T22:26:08,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1870 sec 2024-11-20T22:26:08,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.1920 sec 2024-11-20T22:26:08,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-20T22:26:08,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:08,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:08,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:08,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:08,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141628879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141628884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141628886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141628887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141628891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,902 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T22:26:08,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203cbc40dc516b44dda5f9a0b20e4087be_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141568257/Put/seqid=0 2024-11-20T22:26:08,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742421_1597 (size=12154) 2024-11-20T22:26:08,964 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:08,967 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203cbc40dc516b44dda5f9a0b20e4087be_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203cbc40dc516b44dda5f9a0b20e4087be_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:08,968 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/47b820fcf1404a51adeef205ee02ff1d, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:08,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/47b820fcf1404a51adeef205ee02ff1d is 175, key is test_row_0/A:col10/1732141568257/Put/seqid=0 2024-11-20T22:26:08,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141628985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:08,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:08,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141628991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742422_1598 (size=30955) 2024-11-20T22:26:09,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141628993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,003 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/47b820fcf1404a51adeef205ee02ff1d 2024-11-20T22:26:09,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/26a996f5a8694d539de69e2c13a01149 is 50, key is test_row_0/B:col10/1732141568257/Put/seqid=0 2024-11-20T22:26:09,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742423_1599 (size=12001) 2024-11-20T22:26:09,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/26a996f5a8694d539de69e2c13a01149 2024-11-20T22:26:09,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/10f91229d4134723aff0a47663e6d4c6 is 50, key is test_row_0/C:col10/1732141568257/Put/seqid=0 2024-11-20T22:26:09,163 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:26:09,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742424_1600 (size=12001) 2024-11-20T22:26:09,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/10f91229d4134723aff0a47663e6d4c6 2024-11-20T22:26:09,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141629189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141629194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/47b820fcf1404a51adeef205ee02ff1d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47b820fcf1404a51adeef205ee02ff1d 2024-11-20T22:26:09,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47b820fcf1404a51adeef205ee02ff1d, entries=150, sequenceid=81, filesize=30.2 K 2024-11-20T22:26:09,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/26a996f5a8694d539de69e2c13a01149 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/26a996f5a8694d539de69e2c13a01149 2024-11-20T22:26:09,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141629202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/26a996f5a8694d539de69e2c13a01149, entries=150, sequenceid=81, filesize=11.7 K 2024-11-20T22:26:09,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/10f91229d4134723aff0a47663e6d4c6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/10f91229d4134723aff0a47663e6d4c6 2024-11-20T22:26:09,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/10f91229d4134723aff0a47663e6d4c6, entries=150, sequenceid=81, filesize=11.7 K 2024-11-20T22:26:09,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for 1d38cbecc23f382ec5e2809846caa111 in 355ms, sequenceid=81, compaction requested=true 2024-11-20T22:26:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:09,224 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:26:09,224 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:09,226 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 106632 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:09,226 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:09,226 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:09,226 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2275c30e30cb45b083ff43318caff0cc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0718c620ccdd4280aac0b7a7f47bbd8e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/7e9e6828ff7e40d199be25e54f71ee8b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47b820fcf1404a51adeef205ee02ff1d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=104.1 K 2024-11-20T22:26:09,226 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:09,226 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2275c30e30cb45b083ff43318caff0cc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0718c620ccdd4280aac0b7a7f47bbd8e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/7e9e6828ff7e40d199be25e54f71ee8b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47b820fcf1404a51adeef205ee02ff1d] 2024-11-20T22:26:09,227 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43316 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:09,227 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:09,227 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:09,227 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/fef794012ca84443bb837adb6eab9959, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/1d1f1733451f4f0ab7f72f2a5c42efab, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/db2b124e55df4db7805de73a41bf021e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/26a996f5a8694d539de69e2c13a01149] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=42.3 K 2024-11-20T22:26:09,227 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting fef794012ca84443bb837adb6eab9959, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141566664 2024-11-20T22:26:09,227 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2275c30e30cb45b083ff43318caff0cc, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141566664 2024-11-20T22:26:09,227 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d1f1733451f4f0ab7f72f2a5c42efab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732141566747 2024-11-20T22:26:09,227 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0718c620ccdd4280aac0b7a7f47bbd8e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732141566747 2024-11-20T22:26:09,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,229 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e9e6828ff7e40d199be25e54f71ee8b, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141567068 2024-11-20T22:26:09,229 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting db2b124e55df4db7805de73a41bf021e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141567068 2024-11-20T22:26:09,229 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47b820fcf1404a51adeef205ee02ff1d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732141568231 2024-11-20T22:26:09,230 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 26a996f5a8694d539de69e2c13a01149, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732141568231 2024-11-20T22:26:09,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,246 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:09,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,254 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#508 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:09,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,254 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e3925339933e4d769a23ca2389ac4c3f is 50, key is test_row_0/B:col10/1732141568257/Put/seqid=0 2024-11-20T22:26:09,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,262 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112078b6cd7371274373818997a14e33815f_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,265 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112078b6cd7371274373818997a14e33815f_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:09,265 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112078b6cd7371274373818997a14e33815f_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:09,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742425_1601 (size=12139) 2024-11-20T22:26:09,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,301 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e3925339933e4d769a23ca2389ac4c3f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e3925339933e4d769a23ca2389ac4c3f 2024-11-20T22:26:09,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,310 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into e3925339933e4d769a23ca2389ac4c3f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:09,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:09,310 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=12, startTime=1732141569224; duration=0sec 2024-11-20T22:26:09,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:09,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:09,310 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:09,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,312 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43316 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:09,312 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:09,313 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:09,313 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/42d2ff01a4514ac89670e69880ac39e0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9a3b59419e3949fb87b8fa4b84a23bc9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d95aefd3b0e347f19b2d1852b84a2635, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/10f91229d4134723aff0a47663e6d4c6] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=42.3 K 2024-11-20T22:26:09,313 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 42d2ff01a4514ac89670e69880ac39e0, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732141566664 2024-11-20T22:26:09,313 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a3b59419e3949fb87b8fa4b84a23bc9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732141566747 2024-11-20T22:26:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,314 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d95aefd3b0e347f19b2d1852b84a2635, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732141567068 2024-11-20T22:26:09,315 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 10f91229d4134723aff0a47663e6d4c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732141568231 2024-11-20T22:26:09,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742426_1602 (size=4469) 2024-11-20T22:26:09,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,318 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#507 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:09,319 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ab11ff95fc4b48dbb8d33555f2ed98b1 is 175, key is test_row_0/A:col10/1732141568257/Put/seqid=0 2024-11-20T22:26:09,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,324 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#509 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:09,324 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/2dd1eb85746e4da4a3ad389d812fc4f4 is 50, key is test_row_0/C:col10/1732141568257/Put/seqid=0 2024-11-20T22:26:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742427_1603 (size=31093) 2024-11-20T22:26:09,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,331 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ab11ff95fc4b48dbb8d33555f2ed98b1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ab11ff95fc4b48dbb8d33555f2ed98b1 2024-11-20T22:26:09,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,337 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into ab11ff95fc4b48dbb8d33555f2ed98b1(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:09,337 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:09,337 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=12, startTime=1732141569224; duration=0sec 2024-11-20T22:26:09,337 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:09,337 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:09,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742428_1604 (size=12139) 2024-11-20T22:26:09,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,376 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/2dd1eb85746e4da4a3ad389d812fc4f4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/2dd1eb85746e4da4a3ad389d812fc4f4 2024-11-20T22:26:09,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,390 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into 2dd1eb85746e4da4a3ad389d812fc4f4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:09,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,390 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:09,390 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=12, startTime=1732141569224; duration=0sec 2024-11-20T22:26:09,390 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:09,390 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:09,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:09,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:09,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:09,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:09,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:09,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:09,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:09,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:09,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cb8f2d4bc0d54434aee859dfb0777d0c_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141568883/Put/seqid=0 2024-11-20T22:26:09,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742429_1605 (size=14594) 2024-11-20T22:26:09,514 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:09,519 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cb8f2d4bc0d54434aee859dfb0777d0c_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cb8f2d4bc0d54434aee859dfb0777d0c_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:09,520 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ee884b77acb045df9e37359729db9d40, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:09,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ee884b77acb045df9e37359729db9d40 is 175, key is test_row_0/A:col10/1732141568883/Put/seqid=0 2024-11-20T22:26:09,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742430_1606 (size=39549) 2024-11-20T22:26:09,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141629551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141629551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141629551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,561 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ee884b77acb045df9e37359729db9d40 2024-11-20T22:26:09,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141629558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/974dc958715f49fc9b6a8817eb2fb31b is 50, key is test_row_0/B:col10/1732141568883/Put/seqid=0 2024-11-20T22:26:09,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742431_1607 (size=12001) 2024-11-20T22:26:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141629662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141629662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141629662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141629666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141629873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141629874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141629879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:09,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:09,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141629879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/974dc958715f49fc9b6a8817eb2fb31b 2024-11-20T22:26:10,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/121dc1e127bf463993b64d1a42a69be6 is 50, key is test_row_0/C:col10/1732141568883/Put/seqid=0 2024-11-20T22:26:10,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742432_1608 (size=12001) 2024-11-20T22:26:10,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/121dc1e127bf463993b64d1a42a69be6 2024-11-20T22:26:10,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ee884b77acb045df9e37359729db9d40 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ee884b77acb045df9e37359729db9d40 2024-11-20T22:26:10,102 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ee884b77acb045df9e37359729db9d40, entries=200, sequenceid=95, filesize=38.6 K 2024-11-20T22:26:10,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/974dc958715f49fc9b6a8817eb2fb31b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/974dc958715f49fc9b6a8817eb2fb31b 2024-11-20T22:26:10,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/974dc958715f49fc9b6a8817eb2fb31b, entries=150, sequenceid=95, filesize=11.7 K 2024-11-20T22:26:10,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/121dc1e127bf463993b64d1a42a69be6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/121dc1e127bf463993b64d1a42a69be6 2024-11-20T22:26:10,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/121dc1e127bf463993b64d1a42a69be6, entries=150, sequenceid=95, filesize=11.7 K 2024-11-20T22:26:10,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 1d38cbecc23f382ec5e2809846caa111 in 668ms, sequenceid=95, compaction requested=false 2024-11-20T22:26:10,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:10,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:10,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T22:26:10,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:10,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:10,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:10,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141630189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141630190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141630194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141630194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d9e4888f0e7945ba988fda2a5201711b_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141569550/Put/seqid=0 2024-11-20T22:26:10,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742433_1609 (size=12154) 2024-11-20T22:26:10,247 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,250 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d9e4888f0e7945ba988fda2a5201711b_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d9e4888f0e7945ba988fda2a5201711b_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:10,251 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/8ac3e4f3c1844bbe8575ab5b40a502a8, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:10,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/8ac3e4f3c1844bbe8575ab5b40a502a8 is 175, key is test_row_0/A:col10/1732141569550/Put/seqid=0 2024-11-20T22:26:10,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742434_1610 (size=30955) 2024-11-20T22:26:10,283 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=123, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/8ac3e4f3c1844bbe8575ab5b40a502a8 2024-11-20T22:26:10,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141630296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141630299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141630300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141630307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/23b76ea63d114240b8986ac04676619a is 50, key is test_row_0/B:col10/1732141569550/Put/seqid=0 2024-11-20T22:26:10,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742435_1611 (size=12001) 2024-11-20T22:26:10,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/23b76ea63d114240b8986ac04676619a 2024-11-20T22:26:10,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/af365d71224f4d728e75cda773ed2889 is 50, key is test_row_0/C:col10/1732141569550/Put/seqid=0 2024-11-20T22:26:10,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742436_1612 (size=12001) 2024-11-20T22:26:10,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/af365d71224f4d728e75cda773ed2889 2024-11-20T22:26:10,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/8ac3e4f3c1844bbe8575ab5b40a502a8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8ac3e4f3c1844bbe8575ab5b40a502a8 2024-11-20T22:26:10,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8ac3e4f3c1844bbe8575ab5b40a502a8, entries=150, sequenceid=123, filesize=30.2 K 2024-11-20T22:26:10,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/23b76ea63d114240b8986ac04676619a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/23b76ea63d114240b8986ac04676619a 2024-11-20T22:26:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/23b76ea63d114240b8986ac04676619a, entries=150, sequenceid=123, filesize=11.7 K 2024-11-20T22:26:10,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/af365d71224f4d728e75cda773ed2889 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/af365d71224f4d728e75cda773ed2889 2024-11-20T22:26:10,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/af365d71224f4d728e75cda773ed2889, entries=150, sequenceid=123, filesize=11.7 K 2024-11-20T22:26:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 1d38cbecc23f382ec5e2809846caa111 in 266ms, sequenceid=123, compaction requested=true 2024-11-20T22:26:10,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:10,447 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:10,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:10,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:10,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:10,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:10,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:10,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:10,448 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:10,448 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101597 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:10,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,448 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:10,448 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:10,448 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ab11ff95fc4b48dbb8d33555f2ed98b1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ee884b77acb045df9e37359729db9d40, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8ac3e4f3c1844bbe8575ab5b40a502a8] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=99.2 K 2024-11-20T22:26:10,448 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:10,448 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ab11ff95fc4b48dbb8d33555f2ed98b1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ee884b77acb045df9e37359729db9d40, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8ac3e4f3c1844bbe8575ab5b40a502a8] 2024-11-20T22:26:10,456 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab11ff95fc4b48dbb8d33555f2ed98b1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732141568231 2024-11-20T22:26:10,457 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee884b77acb045df9e37359729db9d40, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732141568883 2024-11-20T22:26:10,458 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:10,458 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ac3e4f3c1844bbe8575ab5b40a502a8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732141569549 2024-11-20T22:26:10,458 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:10,458 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:10,458 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e3925339933e4d769a23ca2389ac4c3f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/974dc958715f49fc9b6a8817eb2fb31b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/23b76ea63d114240b8986ac04676619a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=35.3 K 2024-11-20T22:26:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,467 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e3925339933e4d769a23ca2389ac4c3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732141568231 2024-11-20T22:26:10,468 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 974dc958715f49fc9b6a8817eb2fb31b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732141568883 2024-11-20T22:26:10,468 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 23b76ea63d114240b8986ac04676619a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732141569549 2024-11-20T22:26:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,483 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:10,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,494 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#517 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:10,495 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/d619563bbb2c49c6950bb8aa7dd335af is 50, key is test_row_0/B:col10/1732141569550/Put/seqid=0 2024-11-20T22:26:10,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,501 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204c609419053d4301a6e47ee13b296ef3_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,503 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204c609419053d4301a6e47ee13b296ef3_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:10,503 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204c609419053d4301a6e47ee13b296ef3_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:10,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:10,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:10,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:10,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:10,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:10,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742437_1613 (size=12241) 2024-11-20T22:26:10,528 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/d619563bbb2c49c6950bb8aa7dd335af as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d619563bbb2c49c6950bb8aa7dd335af 2024-11-20T22:26:10,540 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into d619563bbb2c49c6950bb8aa7dd335af(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:10,540 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:10,541 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=13, startTime=1732141570448; duration=0sec 2024-11-20T22:26:10,541 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:10,541 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:10,541 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:10,542 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:10,542 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:10,542 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:10,542 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/2dd1eb85746e4da4a3ad389d812fc4f4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/121dc1e127bf463993b64d1a42a69be6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/af365d71224f4d728e75cda773ed2889] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=35.3 K 2024-11-20T22:26:10,542 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dd1eb85746e4da4a3ad389d812fc4f4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732141568231 2024-11-20T22:26:10,543 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 121dc1e127bf463993b64d1a42a69be6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732141568883 2024-11-20T22:26:10,543 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting af365d71224f4d728e75cda773ed2889, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732141569549 2024-11-20T22:26:10,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742438_1614 (size=4469) 2024-11-20T22:26:10,551 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#516 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:10,552 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/66646e129ec9488397903b651e13d29f is 175, key is test_row_0/A:col10/1732141569550/Put/seqid=0 2024-11-20T22:26:10,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120557b55f47a254e4db86da6e24c77d205_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141570181/Put/seqid=0 2024-11-20T22:26:10,568 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:10,569 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/4d3ec0b3670b428a91c93855d447548f is 50, key is test_row_0/C:col10/1732141569550/Put/seqid=0 2024-11-20T22:26:10,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141630571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141630573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141630571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141630574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742439_1615 (size=31195) 2024-11-20T22:26:10,630 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/66646e129ec9488397903b651e13d29f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/66646e129ec9488397903b651e13d29f 2024-11-20T22:26:10,636 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into 66646e129ec9488397903b651e13d29f(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:10,636 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:10,636 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=13, startTime=1732141570447; duration=0sec 2024-11-20T22:26:10,636 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:10,636 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:10,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742441_1617 (size=12241) 2024-11-20T22:26:10,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742440_1616 (size=14744) 2024-11-20T22:26:10,646 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,650 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120557b55f47a254e4db86da6e24c77d205_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120557b55f47a254e4db86da6e24c77d205_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:10,651 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/e2a60e9704434218adb124cb277401a3, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:10,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/e2a60e9704434218adb124cb277401a3 is 175, key is test_row_0/A:col10/1732141570181/Put/seqid=0 2024-11-20T22:26:10,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141630677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141630678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141630678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141630678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742442_1618 (size=39699) 2024-11-20T22:26:10,684 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/e2a60e9704434218adb124cb277401a3 2024-11-20T22:26:10,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e4ecebae8e4246178d5103e10d713074 is 50, key is test_row_0/B:col10/1732141570181/Put/seqid=0 2024-11-20T22:26:10,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742443_1619 (size=12101) 2024-11-20T22:26:10,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e4ecebae8e4246178d5103e10d713074 2024-11-20T22:26:10,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T22:26:10,750 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-20T22:26:10,751 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:10,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-20T22:26:10,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:26:10,752 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:10,753 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:10,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:10,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/9f9507440b294e7a8d5f18bb712e8a29 is 50, key is test_row_0/C:col10/1732141570181/Put/seqid=0 2024-11-20T22:26:10,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742444_1620 (size=12101) 2024-11-20T22:26:10,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/9f9507440b294e7a8d5f18bb712e8a29 2024-11-20T22:26:10,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/e2a60e9704434218adb124cb277401a3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e2a60e9704434218adb124cb277401a3 2024-11-20T22:26:10,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e2a60e9704434218adb124cb277401a3, entries=200, sequenceid=135, filesize=38.8 K 2024-11-20T22:26:10,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e4ecebae8e4246178d5103e10d713074 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e4ecebae8e4246178d5103e10d713074 2024-11-20T22:26:10,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e4ecebae8e4246178d5103e10d713074, entries=150, sequenceid=135, filesize=11.8 K 2024-11-20T22:26:10,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/9f9507440b294e7a8d5f18bb712e8a29 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9f9507440b294e7a8d5f18bb712e8a29 2024-11-20T22:26:10,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9f9507440b294e7a8d5f18bb712e8a29, entries=150, sequenceid=135, filesize=11.8 K 2024-11-20T22:26:10,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 1d38cbecc23f382ec5e2809846caa111 in 297ms, sequenceid=135, compaction requested=false 2024-11-20T22:26:10,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:26:10,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:10,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:26:10,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:10,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:10,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:10,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:10,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120008e1ecf3f514b9dbfd4052556802f87_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141570881/Put/seqid=0 2024-11-20T22:26:10,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141630891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141630892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141630893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141630896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,904 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T22:26:10,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:10,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:10,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:10,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:10,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:10,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141630910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:10,914 DEBUG [Thread-2577 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., hostname=6365a1e51efd,46811,1732141422048, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:26:10,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742445_1621 (size=12304) 2024-11-20T22:26:10,920 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:10,925 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120008e1ecf3f514b9dbfd4052556802f87_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120008e1ecf3f514b9dbfd4052556802f87_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:10,926 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/8f72332cce07408085c59b0d5fa2a10c, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:10,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/8f72332cce07408085c59b0d5fa2a10c is 175, key is test_row_0/A:col10/1732141570881/Put/seqid=0 2024-11-20T22:26:10,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742446_1622 (size=31105) 2024-11-20T22:26:10,942 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=163, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/8f72332cce07408085c59b0d5fa2a10c 2024-11-20T22:26:10,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/16404ac2e68b45bc97bae042ef2cad4d is 50, key is test_row_0/B:col10/1732141570881/Put/seqid=0 2024-11-20T22:26:10,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742447_1623 (size=12151) 2024-11-20T22:26:10,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/16404ac2e68b45bc97bae042ef2cad4d 2024-11-20T22:26:10,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/f9b683d66c0b407cab958c035b8f8b17 is 50, key is test_row_0/C:col10/1732141570881/Put/seqid=0 2024-11-20T22:26:10,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:10,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141630998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141630998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141630999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141631000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742448_1624 (size=12151) 2024-11-20T22:26:11,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/f9b683d66c0b407cab958c035b8f8b17 2024-11-20T22:26:11,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/8f72332cce07408085c59b0d5fa2a10c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8f72332cce07408085c59b0d5fa2a10c 2024-11-20T22:26:11,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8f72332cce07408085c59b0d5fa2a10c, entries=150, sequenceid=163, filesize=30.4 K 2024-11-20T22:26:11,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/16404ac2e68b45bc97bae042ef2cad4d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/16404ac2e68b45bc97bae042ef2cad4d 2024-11-20T22:26:11,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/16404ac2e68b45bc97bae042ef2cad4d, entries=150, sequenceid=163, filesize=11.9 K 2024-11-20T22:26:11,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/f9b683d66c0b407cab958c035b8f8b17 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f9b683d66c0b407cab958c035b8f8b17 2024-11-20T22:26:11,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f9b683d66c0b407cab958c035b8f8b17, entries=150, sequenceid=163, filesize=11.9 K 2024-11-20T22:26:11,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 1d38cbecc23f382ec5e2809846caa111 in 159ms, sequenceid=163, compaction requested=true 2024-11-20T22:26:11,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:11,041 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:11,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:11,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:11,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:11,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:11,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:11,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:26:11,043 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:11,043 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:11,044 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:11,044 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/66646e129ec9488397903b651e13d29f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e2a60e9704434218adb124cb277401a3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8f72332cce07408085c59b0d5fa2a10c] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=99.6 K 2024-11-20T22:26:11,044 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:11,044 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/66646e129ec9488397903b651e13d29f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e2a60e9704434218adb124cb277401a3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8f72332cce07408085c59b0d5fa2a10c] 2024-11-20T22:26:11,044 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66646e129ec9488397903b651e13d29f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732141569549 2024-11-20T22:26:11,045 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2a60e9704434218adb124cb277401a3, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732141570181 2024-11-20T22:26:11,045 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f72332cce07408085c59b0d5fa2a10c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141570571 2024-11-20T22:26:11,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:26:11,054 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/4d3ec0b3670b428a91c93855d447548f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/4d3ec0b3670b428a91c93855d447548f 2024-11-20T22:26:11,057 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T22:26:11,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:11,058 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T22:26:11,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:11,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:11,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:11,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,062 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into 4d3ec0b3670b428a91c93855d447548f(size=12.0 K), total size for store is 35.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:11,062 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:11,062 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=13, startTime=1732141570448; duration=0sec 2024-11-20T22:26:11,062 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:26:11,062 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:11,062 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:11,063 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:11,064 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:11,064 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:11,064 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:11,064 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d619563bbb2c49c6950bb8aa7dd335af, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e4ecebae8e4246178d5103e10d713074, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/16404ac2e68b45bc97bae042ef2cad4d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=35.6 K 2024-11-20T22:26:11,064 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d619563bbb2c49c6950bb8aa7dd335af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732141569549 2024-11-20T22:26:11,065 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e4ecebae8e4246178d5103e10d713074, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732141570181 2024-11-20T22:26:11,065 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 16404ac2e68b45bc97bae042ef2cad4d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141570571 2024-11-20T22:26:11,068 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:11,087 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120341f51c9dead4b04bd13b25773b13415_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:11,089 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120341f51c9dead4b04bd13b25773b13415_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:11,090 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120341f51c9dead4b04bd13b25773b13415_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:11,106 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#526 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:11,106 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/92cab52edaec436282660c0d36374ba4 is 50, key is test_row_0/B:col10/1732141570881/Put/seqid=0 2024-11-20T22:26:11,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120506fb630728248d8bae7ee2e867a2a8a_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141570892/Put/seqid=0 2024-11-20T22:26:11,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742449_1625 (size=4469) 2024-11-20T22:26:11,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742451_1627 (size=12304) 2024-11-20T22:26:11,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:11,181 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120506fb630728248d8bae7ee2e867a2a8a_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120506fb630728248d8bae7ee2e867a2a8a_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:11,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/94043d1049f34c278252c61e69d0f3f0, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:11,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/94043d1049f34c278252c61e69d0f3f0 is 175, key is test_row_0/A:col10/1732141570892/Put/seqid=0 2024-11-20T22:26:11,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742452_1628 (size=31105) 2024-11-20T22:26:11,199 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/94043d1049f34c278252c61e69d0f3f0 2024-11-20T22:26:11,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742450_1626 (size=12493) 2024-11-20T22:26:11,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:11,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:11,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/644f8e4bd4bb4a538faf446c85717f21 is 50, key is test_row_0/B:col10/1732141570892/Put/seqid=0 2024-11-20T22:26:11,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742453_1629 (size=12151) 2024-11-20T22:26:11,243 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/644f8e4bd4bb4a538faf446c85717f21 2024-11-20T22:26:11,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141631247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141631247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/282864c1250040d0b0115a5552dd7b93 is 50, key is test_row_0/C:col10/1732141570892/Put/seqid=0 2024-11-20T22:26:11,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141631249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141631251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742454_1630 (size=12151) 2024-11-20T22:26:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:26:11,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141631352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141631354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141631356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141631357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141631557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141631561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141631562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141631562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,573 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#525 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:11,573 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/18bab7be2dd44b439813bf34248140f9 is 175, key is test_row_0/A:col10/1732141570881/Put/seqid=0 2024-11-20T22:26:11,604 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/92cab52edaec436282660c0d36374ba4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/92cab52edaec436282660c0d36374ba4 2024-11-20T22:26:11,610 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into 92cab52edaec436282660c0d36374ba4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:11,610 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:11,610 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=13, startTime=1732141571041; duration=0sec 2024-11-20T22:26:11,611 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:11,611 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:11,611 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:11,611 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:11,612 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:11,612 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:11,612 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/4d3ec0b3670b428a91c93855d447548f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9f9507440b294e7a8d5f18bb712e8a29, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f9b683d66c0b407cab958c035b8f8b17] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=35.6 K 2024-11-20T22:26:11,612 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d3ec0b3670b428a91c93855d447548f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732141569549 2024-11-20T22:26:11,612 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f9507440b294e7a8d5f18bb712e8a29, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732141570181 2024-11-20T22:26:11,613 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting f9b683d66c0b407cab958c035b8f8b17, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141570571 2024-11-20T22:26:11,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742455_1631 (size=31447) 2024-11-20T22:26:11,627 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/18bab7be2dd44b439813bf34248140f9 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/18bab7be2dd44b439813bf34248140f9 2024-11-20T22:26:11,632 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into 18bab7be2dd44b439813bf34248140f9(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:11,632 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:11,632 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=13, startTime=1732141571041; duration=0sec 2024-11-20T22:26:11,632 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:11,632 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:11,636 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#530 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:11,637 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/51adb3186946450385d89fb63b023bb4 is 50, key is test_row_0/C:col10/1732141570881/Put/seqid=0 2024-11-20T22:26:11,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742456_1632 (size=12493) 2024-11-20T22:26:11,699 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/282864c1250040d0b0115a5552dd7b93 2024-11-20T22:26:11,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/94043d1049f34c278252c61e69d0f3f0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/94043d1049f34c278252c61e69d0f3f0 2024-11-20T22:26:11,707 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/51adb3186946450385d89fb63b023bb4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/51adb3186946450385d89fb63b023bb4 2024-11-20T22:26:11,713 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/94043d1049f34c278252c61e69d0f3f0, entries=150, sequenceid=173, filesize=30.4 K 2024-11-20T22:26:11,714 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into 51adb3186946450385d89fb63b023bb4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:11,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/644f8e4bd4bb4a538faf446c85717f21 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/644f8e4bd4bb4a538faf446c85717f21 2024-11-20T22:26:11,714 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:11,714 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=13, startTime=1732141571042; duration=0sec 2024-11-20T22:26:11,714 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:11,714 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:11,720 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/644f8e4bd4bb4a538faf446c85717f21, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:26:11,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/282864c1250040d0b0115a5552dd7b93 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/282864c1250040d0b0115a5552dd7b93 2024-11-20T22:26:11,727 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/282864c1250040d0b0115a5552dd7b93, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T22:26:11,733 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 1d38cbecc23f382ec5e2809846caa111 in 675ms, sequenceid=173, compaction requested=false 2024-11-20T22:26:11,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:11,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:11,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-20T22:26:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-20T22:26:11,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-20T22:26:11,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 981 msec 2024-11-20T22:26:11,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 986 msec 2024-11-20T22:26:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T22:26:11,855 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-20T22:26:11,856 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-20T22:26:11,857 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:11,858 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:11,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:26:11,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:11,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T22:26:11,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:11,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:11,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:11,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:11,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141631870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141631871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141631872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c0d040d3441946e29aacca5ffbdff998_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141571864/Put/seqid=0 2024-11-20T22:26:11,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141631873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742457_1633 (size=14794) 2024-11-20T22:26:11,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:26:11,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141631974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141631975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141631975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:11,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:11,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141631977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,010 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:12,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:12,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:26:12,162 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:12,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:12,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141632177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141632178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141632180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141632182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,282 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:12,285 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c0d040d3441946e29aacca5ffbdff998_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c0d040d3441946e29aacca5ffbdff998_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:12,286 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/0fe50a8a090f4693a3f29aae68cd9ad6, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:12,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/0fe50a8a090f4693a3f29aae68cd9ad6 is 175, key is test_row_0/A:col10/1732141571864/Put/seqid=0 2024-11-20T22:26:12,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742458_1634 (size=39749) 2024-11-20T22:26:12,315 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:12,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:12,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:26:12,467 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:12,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:12,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141632480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141632482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141632483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141632483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,621 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:12,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:12,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,690 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=206, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/0fe50a8a090f4693a3f29aae68cd9ad6 2024-11-20T22:26:12,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/d4b3b31a6b1d4bc69d68d35edd6b8b99 is 50, key is test_row_0/B:col10/1732141571864/Put/seqid=0 2024-11-20T22:26:12,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742459_1635 (size=12151) 2024-11-20T22:26:12,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/d4b3b31a6b1d4bc69d68d35edd6b8b99 2024-11-20T22:26:12,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d05273e6c64a4c6ca5db163758b4078f is 50, key is test_row_0/C:col10/1732141571864/Put/seqid=0 2024-11-20T22:26:12,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742460_1636 (size=12151) 2024-11-20T22:26:12,774 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:12,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:12,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,926 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:12,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:12,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:12,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:12,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:26:12,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141632985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141632987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141632989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141632989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:13,078 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:13,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:13,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:13,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:13,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:13,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:13,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:13,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d05273e6c64a4c6ca5db163758b4078f 2024-11-20T22:26:13,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/0fe50a8a090f4693a3f29aae68cd9ad6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0fe50a8a090f4693a3f29aae68cd9ad6 2024-11-20T22:26:13,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0fe50a8a090f4693a3f29aae68cd9ad6, entries=200, sequenceid=206, filesize=38.8 K 2024-11-20T22:26:13,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/d4b3b31a6b1d4bc69d68d35edd6b8b99 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d4b3b31a6b1d4bc69d68d35edd6b8b99 2024-11-20T22:26:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d4b3b31a6b1d4bc69d68d35edd6b8b99, entries=150, sequenceid=206, filesize=11.9 K 2024-11-20T22:26:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d05273e6c64a4c6ca5db163758b4078f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d05273e6c64a4c6ca5db163758b4078f 2024-11-20T22:26:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d05273e6c64a4c6ca5db163758b4078f, entries=150, sequenceid=206, filesize=11.9 K 2024-11-20T22:26:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 1d38cbecc23f382ec5e2809846caa111 in 1266ms, sequenceid=206, compaction requested=true 2024-11-20T22:26:13,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:13,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:13,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:13,133 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:13,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:13,133 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:13,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:13,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,134 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:13,134 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,134 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:13,134 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,134 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:13,134 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:13,134 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/92cab52edaec436282660c0d36374ba4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/644f8e4bd4bb4a538faf446c85717f21, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d4b3b31a6b1d4bc69d68d35edd6b8b99] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=35.9 K 2024-11-20T22:26:13,134 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/18bab7be2dd44b439813bf34248140f9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/94043d1049f34c278252c61e69d0f3f0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0fe50a8a090f4693a3f29aae68cd9ad6] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=99.9 K 2024-11-20T22:26:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,134 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:13,134 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/18bab7be2dd44b439813bf34248140f9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/94043d1049f34c278252c61e69d0f3f0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0fe50a8a090f4693a3f29aae68cd9ad6] 2024-11-20T22:26:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,135 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 92cab52edaec436282660c0d36374ba4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141570571 2024-11-20T22:26:13,135 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18bab7be2dd44b439813bf34248140f9, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141570571 2024-11-20T22:26:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,135 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 644f8e4bd4bb4a538faf446c85717f21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141570890 2024-11-20T22:26:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,135 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94043d1049f34c278252c61e69d0f3f0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141570890 2024-11-20T22:26:13,135 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d4b3b31a6b1d4bc69d68d35edd6b8b99, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732141571247 2024-11-20T22:26:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,135 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fe50a8a090f4693a3f29aae68cd9ad6, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732141571247 2024-11-20T22:26:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,140 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,141 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#535 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,142 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/5d412837fb464756a10e728ddba96d71 is 50, key is test_row_0/B:col10/1732141571864/Put/seqid=0 2024-11-20T22:26:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,143 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112067c7a93127ea47cbb44a19e464572b74_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,145 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112067c7a93127ea47cbb44a19e464572b74_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:13,146 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112067c7a93127ea47cbb44a19e464572b74_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742461_1637 (size=12595) 2024-11-20T22:26:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742462_1638 (size=4469) 2024-11-20T22:26:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,156 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#534 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:13,156 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f18d1b319f7a4fb8a4f110def36c73d3 is 175, key is test_row_0/A:col10/1732141571864/Put/seqid=0 2024-11-20T22:26:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,165 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/5d412837fb464756a10e728ddba96d71 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/5d412837fb464756a10e728ddba96d71 2024-11-20T22:26:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,170 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into 5d412837fb464756a10e728ddba96d71(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:13,170 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:13,170 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=13, startTime=1732141573133; duration=0sec 2024-11-20T22:26:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,170 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:13,170 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:13,171 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,171 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:13,171 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:13,171 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,172 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/51adb3186946450385d89fb63b023bb4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/282864c1250040d0b0115a5552dd7b93, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d05273e6c64a4c6ca5db163758b4078f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=35.9 K 2024-11-20T22:26:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,172 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 51adb3186946450385d89fb63b023bb4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732141570571 2024-11-20T22:26:13,172 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 282864c1250040d0b0115a5552dd7b93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732141570890 2024-11-20T22:26:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,172 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting d05273e6c64a4c6ca5db163758b4078f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732141571247 2024-11-20T22:26:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742463_1639 (size=31549) 2024-11-20T22:26:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,179 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#536 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,180 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/203d8480191a47e9a2c44198d3ad12cf is 50, key is test_row_0/C:col10/1732141571864/Put/seqid=0 2024-11-20T22:26:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,182 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f18d1b319f7a4fb8a4f110def36c73d3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f18d1b319f7a4fb8a4f110def36c73d3 2024-11-20T22:26:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742464_1640 (size=12595) 2024-11-20T22:26:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,197 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into f18d1b319f7a4fb8a4f110def36c73d3(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:13,197 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:13,197 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=13, startTime=1732141573133; duration=0sec 2024-11-20T22:26:13,197 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:13,197 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,203 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/203d8480191a47e9a2c44198d3ad12cf as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/203d8480191a47e9a2c44198d3ad12cf 2024-11-20T22:26:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,209 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into 203d8480191a47e9a2c44198d3ad12cf(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:13,209 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:13,209 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=13, startTime=1732141573133; duration=0sec 2024-11-20T22:26:13,209 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:13,209 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,231 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:13,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T22:26:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:13,231 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T22:26:13,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:13,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:13,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:13,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120513698f9d050427190db8574d5644c10_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_1/A:col10/1732141571872/Put/seqid=0 2024-11-20T22:26:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742465_1641 (size=7324) 2024-11-20T22:26:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,687 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120513698f9d050427190db8574d5644c10_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120513698f9d050427190db8574d5644c10_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/4f675db09f5a45458f38724e5828e737, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/4f675db09f5a45458f38724e5828e737 is 175, key is test_row_1/A:col10/1732141571872/Put/seqid=0 2024-11-20T22:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742466_1642 (size=13815) 2024-11-20T22:26:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,697 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=216, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/4f675db09f5a45458f38724e5828e737 2024-11-20T22:26:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/ed08c773b6644bfdbba9e56bb8f065b5 is 50, key is test_row_1/B:col10/1732141571872/Put/seqid=0 2024-11-20T22:26:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742467_1643 (size=7365) 2024-11-20T22:26:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,711 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/ed08c773b6644bfdbba9e56bb8f065b5 2024-11-20T22:26:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/3f3ef7ee800244c890c423095608def5 is 50, key is test_row_1/C:col10/1732141571872/Put/seqid=0 2024-11-20T22:26:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742468_1644 (size=7365) 2024-11-20T22:26:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,736 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/3f3ef7ee800244c890c423095608def5 2024-11-20T22:26:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/4f675db09f5a45458f38724e5828e737 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/4f675db09f5a45458f38724e5828e737 2024-11-20T22:26:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,749 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/4f675db09f5a45458f38724e5828e737, entries=50, sequenceid=216, filesize=13.5 K 2024-11-20T22:26:13,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/ed08c773b6644bfdbba9e56bb8f065b5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/ed08c773b6644bfdbba9e56bb8f065b5 2024-11-20T22:26:13,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,752 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/ed08c773b6644bfdbba9e56bb8f065b5, entries=50, sequenceid=216, filesize=7.2 K 2024-11-20T22:26:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/3f3ef7ee800244c890c423095608def5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/3f3ef7ee800244c890c423095608def5 2024-11-20T22:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,756 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/3f3ef7ee800244c890c423095608def5, entries=50, sequenceid=216, filesize=7.2 K 2024-11-20T22:26:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,757 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 1d38cbecc23f382ec5e2809846caa111 in 526ms, sequenceid=216, compaction requested=false 2024-11-20T22:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:13,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:13,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-20T22:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-20T22:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-20T22:26:13,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9000 sec 2024-11-20T22:26:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.9040 sec 2024-11-20T22:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T22:26:13,962 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-20T22:26:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,963 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-20T22:26:13,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,965 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:13,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:13,966 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:14,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:14,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:14,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:14,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:14,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:14,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:14,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e22a67813294222beb34c5f9105898a_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141574052/Put/seqid=0 2024-11-20T22:26:14,117 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:14,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:14,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742470_1646 (size=24758) 2024-11-20T22:26:14,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141634173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141634174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141634174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141634175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,270 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141634278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141634279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141634279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141634280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,423 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:14,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:14,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141634483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141634483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141634483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141634483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,549 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:14,552 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e22a67813294222beb34c5f9105898a_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e22a67813294222beb34c5f9105898a_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:14,553 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f012ea9a96cf418f98986dbafbcd2449, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:14,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f012ea9a96cf418f98986dbafbcd2449 is 175, key is test_row_0/A:col10/1732141574052/Put/seqid=0 2024-11-20T22:26:14,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742469_1645 (size=74395) 2024-11-20T22:26:14,555 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=227, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f012ea9a96cf418f98986dbafbcd2449 2024-11-20T22:26:14,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/652ceb85d12f46ebb881c0c298f2dede is 50, key is test_row_0/B:col10/1732141574052/Put/seqid=0 2024-11-20T22:26:14,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:14,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:14,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:14,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742471_1647 (size=12151) 2024-11-20T22:26:14,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/652ceb85d12f46ebb881c0c298f2dede 2024-11-20T22:26:14,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/0d373eb10d8c439496436238684c985d is 50, key is test_row_0/C:col10/1732141574052/Put/seqid=0 2024-11-20T22:26:14,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742472_1648 (size=12151) 2024-11-20T22:26:14,730 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:14,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:14,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141634785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141634787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141634787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141634795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:14,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:14,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:14,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141634938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:14,941 DEBUG [Thread-2577 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8191 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., hostname=6365a1e51efd,46811,1732141422048, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:26:15,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/0d373eb10d8c439496436238684c985d 2024-11-20T22:26:15,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f012ea9a96cf418f98986dbafbcd2449 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f012ea9a96cf418f98986dbafbcd2449 2024-11-20T22:26:15,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f012ea9a96cf418f98986dbafbcd2449, entries=400, sequenceid=227, filesize=72.7 K 2024-11-20T22:26:15,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/652ceb85d12f46ebb881c0c298f2dede as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/652ceb85d12f46ebb881c0c298f2dede 2024-11-20T22:26:15,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/652ceb85d12f46ebb881c0c298f2dede, entries=150, sequenceid=227, filesize=11.9 K 2024-11-20T22:26:15,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/0d373eb10d8c439496436238684c985d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/0d373eb10d8c439496436238684c985d 2024-11-20T22:26:15,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/0d373eb10d8c439496436238684c985d, entries=150, sequenceid=227, filesize=11.9 K 2024-11-20T22:26:15,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 1d38cbecc23f382ec5e2809846caa111 in 958ms, sequenceid=227, compaction requested=true 2024-11-20T22:26:15,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:15,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:15,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:15,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:15,020 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:15,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:15,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:15,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:26:15,020 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:15,030 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32111 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:15,030 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:15,030 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:15,030 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:15,030 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:15,031 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:15,031 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/5d412837fb464756a10e728ddba96d71, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/ed08c773b6644bfdbba9e56bb8f065b5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/652ceb85d12f46ebb881c0c298f2dede] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=31.4 K 2024-11-20T22:26:15,031 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f18d1b319f7a4fb8a4f110def36c73d3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/4f675db09f5a45458f38724e5828e737, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f012ea9a96cf418f98986dbafbcd2449] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=117.0 K 2024-11-20T22:26:15,031 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:15,031 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f18d1b319f7a4fb8a4f110def36c73d3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/4f675db09f5a45458f38724e5828e737, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f012ea9a96cf418f98986dbafbcd2449] 2024-11-20T22:26:15,031 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d412837fb464756a10e728ddba96d71, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732141571247 2024-11-20T22:26:15,031 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f18d1b319f7a4fb8a4f110def36c73d3, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732141571247 2024-11-20T22:26:15,031 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting ed08c773b6644bfdbba9e56bb8f065b5, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732141571872 2024-11-20T22:26:15,031 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f675db09f5a45458f38724e5828e737, keycount=50, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732141571872 2024-11-20T22:26:15,031 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 652ceb85d12f46ebb881c0c298f2dede, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732141574037 2024-11-20T22:26:15,032 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f012ea9a96cf418f98986dbafbcd2449, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732141574022 2024-11-20T22:26:15,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T22:26:15,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:15,036 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T22:26:15,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:15,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:15,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:15,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:15,036 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#543 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:15,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:15,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:15,037 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/15704e48e2754280b3df5e69cdc93c98 is 50, key is test_row_0/B:col10/1732141574052/Put/seqid=0 2024-11-20T22:26:15,043 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:15,067 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411209795bb29246843808fd48e8f3a97681f_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:15,071 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411209795bb29246843808fd48e8f3a97681f_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:15,071 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209795bb29246843808fd48e8f3a97681f_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:15,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:15,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e5d9989b9dbe4c939411b1e183c4fb9f_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141574173/Put/seqid=0 2024-11-20T22:26:15,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742473_1649 (size=12697) 2024-11-20T22:26:15,078 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/15704e48e2754280b3df5e69cdc93c98 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/15704e48e2754280b3df5e69cdc93c98 2024-11-20T22:26:15,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742475_1651 (size=12304) 2024-11-20T22:26:15,083 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into 15704e48e2754280b3df5e69cdc93c98(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:15,083 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:15,083 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=13, startTime=1732141575020; duration=0sec 2024-11-20T22:26:15,083 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:15,083 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:15,083 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:15,084 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32111 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:15,084 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:15,084 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:15,084 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/203d8480191a47e9a2c44198d3ad12cf, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/3f3ef7ee800244c890c423095608def5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/0d373eb10d8c439496436238684c985d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=31.4 K 2024-11-20T22:26:15,084 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 203d8480191a47e9a2c44198d3ad12cf, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732141571247 2024-11-20T22:26:15,085 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f3ef7ee800244c890c423095608def5, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732141571872 2024-11-20T22:26:15,085 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d373eb10d8c439496436238684c985d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732141574037 2024-11-20T22:26:15,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742474_1650 (size=4469) 2024-11-20T22:26:15,091 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#546 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:15,092 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/267ade1bdf364a32ace9f4ce770e719e is 50, key is test_row_0/C:col10/1732141574052/Put/seqid=0 2024-11-20T22:26:15,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742476_1652 (size=12697) 2024-11-20T22:26:15,112 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/267ade1bdf364a32ace9f4ce770e719e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/267ade1bdf364a32ace9f4ce770e719e 2024-11-20T22:26:15,117 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into 267ade1bdf364a32ace9f4ce770e719e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:15,117 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:15,117 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=13, startTime=1732141575020; duration=0sec 2024-11-20T22:26:15,117 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:15,117 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:15,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:15,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141635302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141635304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141635305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141635304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141635407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141635409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141635410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141635415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:15,499 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#544 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:15,500 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f41423be1a1e4773938b6ccfdf869166 is 175, key is test_row_0/A:col10/1732141574052/Put/seqid=0 2024-11-20T22:26:15,502 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e5d9989b9dbe4c939411b1e183c4fb9f_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e5d9989b9dbe4c939411b1e183c4fb9f_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:15,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/a56ebe9bf18649238fb1bd7ab76b9a66, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:15,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/a56ebe9bf18649238fb1bd7ab76b9a66 is 175, key is test_row_0/A:col10/1732141574173/Put/seqid=0 2024-11-20T22:26:15,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742477_1653 (size=31651) 2024-11-20T22:26:15,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742478_1654 (size=31105) 2024-11-20T22:26:15,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141635610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141635614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141635615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141635620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141635913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141635918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141635919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:15,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141635926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:15,932 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f41423be1a1e4773938b6ccfdf869166 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f41423be1a1e4773938b6ccfdf869166 2024-11-20T22:26:15,936 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into f41423be1a1e4773938b6ccfdf869166(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:15,936 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:15,936 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=13, startTime=1732141575020; duration=0sec 2024-11-20T22:26:15,936 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:15,936 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:15,940 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/a56ebe9bf18649238fb1bd7ab76b9a66 2024-11-20T22:26:15,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/71298ff697c249728c331463d673d03f is 50, key is test_row_0/B:col10/1732141574173/Put/seqid=0 2024-11-20T22:26:15,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742479_1655 (size=12151) 2024-11-20T22:26:15,960 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/71298ff697c249728c331463d673d03f 2024-11-20T22:26:15,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/38897f5cbe8c4138ad59d3a1470972be is 50, key is test_row_0/C:col10/1732141574173/Put/seqid=0 2024-11-20T22:26:15,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742480_1656 (size=12151) 2024-11-20T22:26:15,973 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/38897f5cbe8c4138ad59d3a1470972be 2024-11-20T22:26:15,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/a56ebe9bf18649238fb1bd7ab76b9a66 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/a56ebe9bf18649238fb1bd7ab76b9a66 2024-11-20T22:26:15,982 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/a56ebe9bf18649238fb1bd7ab76b9a66, entries=150, sequenceid=253, filesize=30.4 K 2024-11-20T22:26:15,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T22:26:15,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/71298ff697c249728c331463d673d03f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/71298ff697c249728c331463d673d03f 2024-11-20T22:26:15,991 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/71298ff697c249728c331463d673d03f, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T22:26:15,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/38897f5cbe8c4138ad59d3a1470972be as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/38897f5cbe8c4138ad59d3a1470972be 2024-11-20T22:26:15,996 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/38897f5cbe8c4138ad59d3a1470972be, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T22:26:15,997 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 1d38cbecc23f382ec5e2809846caa111 in 961ms, sequenceid=253, compaction requested=false 2024-11-20T22:26:15,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:15,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:15,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-20T22:26:15,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-20T22:26:16,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-20T22:26:16,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0340 sec 2024-11-20T22:26:16,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.0380 sec 2024-11-20T22:26:16,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T22:26:16,074 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-20T22:26:16,075 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:16,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-20T22:26:16,076 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:16,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T22:26:16,077 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:16,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:16,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T22:26:16,228 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T22:26:16,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:16,229 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T22:26:16,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:16,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:16,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:16,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:16,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:16,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:16,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201418b487e40647fe80a31f7a5828e3ea_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141575302/Put/seqid=0 2024-11-20T22:26:16,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742481_1657 (size=12454) 2024-11-20T22:26:16,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,255 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201418b487e40647fe80a31f7a5828e3ea_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201418b487e40647fe80a31f7a5828e3ea_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:16,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/cd96416ff4f04e1185c34649d53c6086, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:16,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/cd96416ff4f04e1185c34649d53c6086 is 175, key is test_row_0/A:col10/1732141575302/Put/seqid=0 2024-11-20T22:26:16,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742482_1658 (size=31255) 2024-11-20T22:26:16,262 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=266, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/cd96416ff4f04e1185c34649d53c6086 2024-11-20T22:26:16,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/c96084fa83f64d92991442b9b3310b30 is 50, key is test_row_0/B:col10/1732141575302/Put/seqid=0 2024-11-20T22:26:16,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742483_1659 (size=12301) 2024-11-20T22:26:16,285 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/c96084fa83f64d92991442b9b3310b30 2024-11-20T22:26:16,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/cd876b76201e4e5dbf7bc313808d7af6 is 50, key is test_row_0/C:col10/1732141575302/Put/seqid=0 2024-11-20T22:26:16,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742484_1660 (size=12301) 2024-11-20T22:26:16,325 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/cd876b76201e4e5dbf7bc313808d7af6 2024-11-20T22:26:16,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/cd96416ff4f04e1185c34649d53c6086 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/cd96416ff4f04e1185c34649d53c6086 2024-11-20T22:26:16,338 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/cd96416ff4f04e1185c34649d53c6086, entries=150, sequenceid=266, filesize=30.5 K 2024-11-20T22:26:16,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/c96084fa83f64d92991442b9b3310b30 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c96084fa83f64d92991442b9b3310b30 2024-11-20T22:26:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,343 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c96084fa83f64d92991442b9b3310b30, entries=150, sequenceid=266, filesize=12.0 K 2024-11-20T22:26:16,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/cd876b76201e4e5dbf7bc313808d7af6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/cd876b76201e4e5dbf7bc313808d7af6 2024-11-20T22:26:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/cd876b76201e4e5dbf7bc313808d7af6, entries=150, sequenceid=266, filesize=12.0 K 2024-11-20T22:26:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,355 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 1d38cbecc23f382ec5e2809846caa111 in 126ms, sequenceid=266, compaction requested=true 2024-11-20T22:26:16,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:16,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:16,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-20T22:26:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-20T22:26:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-20T22:26:16,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 280 msec 2024-11-20T22:26:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 284 msec 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T22:26:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,379 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-20T22:26:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,382 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-20T22:26:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,385 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,385 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,386 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:16,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:16,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:16,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:16,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:16,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:16,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e71f0f7222a4b8dbac3f9d9a8e93443_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141576483/Put/seqid=0 2024-11-20T22:26:16,538 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:16,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742486_1662 (size=25158) 2024-11-20T22:26:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:16,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:16,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:16,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141636622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141636624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141636624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141636625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:16,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:16,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:16,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:16,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:16,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141636729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141636733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141636735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141636735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,850 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:16,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:16,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:16,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:16,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:16,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141636937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,940 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:16,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141636939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141636939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:16,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141636940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:16,943 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e71f0f7222a4b8dbac3f9d9a8e93443_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e71f0f7222a4b8dbac3f9d9a8e93443_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:16,944 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f4cdec10fa904e6fb5c2a375446bbc60, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:16,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f4cdec10fa904e6fb5c2a375446bbc60 is 175, key is test_row_0/A:col10/1732141576483/Put/seqid=0 2024-11-20T22:26:16,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742485_1661 (size=74795) 2024-11-20T22:26:16,947 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f4cdec10fa904e6fb5c2a375446bbc60 2024-11-20T22:26:16,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e06e2fb19c454925be587d6d49dbb523 is 50, key is test_row_0/B:col10/1732141576483/Put/seqid=0 2024-11-20T22:26:16,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742487_1663 (size=12301) 2024-11-20T22:26:16,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e06e2fb19c454925be587d6d49dbb523 2024-11-20T22:26:16,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:17,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/df909c141f6041cab013a5486e992ac4 is 50, key is test_row_0/C:col10/1732141576483/Put/seqid=0 2024-11-20T22:26:17,003 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:17,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:17,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:17,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:17,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:17,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:17,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742488_1664 (size=12301) 2024-11-20T22:26:17,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/df909c141f6041cab013a5486e992ac4 2024-11-20T22:26:17,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/f4cdec10fa904e6fb5c2a375446bbc60 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f4cdec10fa904e6fb5c2a375446bbc60 2024-11-20T22:26:17,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f4cdec10fa904e6fb5c2a375446bbc60, entries=400, sequenceid=277, filesize=73.0 K 2024-11-20T22:26:17,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e06e2fb19c454925be587d6d49dbb523 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e06e2fb19c454925be587d6d49dbb523 2024-11-20T22:26:17,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e06e2fb19c454925be587d6d49dbb523, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T22:26:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/df909c141f6041cab013a5486e992ac4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/df909c141f6041cab013a5486e992ac4 2024-11-20T22:26:17,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/df909c141f6041cab013a5486e992ac4, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T22:26:17,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 1d38cbecc23f382ec5e2809846caa111 in 534ms, sequenceid=277, compaction requested=true 2024-11-20T22:26:17,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:17,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:17,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:17,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:17,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:17,024 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:17,024 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:17,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:17,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:17,026 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 168806 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:17,026 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:17,026 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:17,026 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:17,026 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:17,026 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:17,026 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f41423be1a1e4773938b6ccfdf869166, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/a56ebe9bf18649238fb1bd7ab76b9a66, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/cd96416ff4f04e1185c34649d53c6086, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f4cdec10fa904e6fb5c2a375446bbc60] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=164.8 K 2024-11-20T22:26:17,026 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/15704e48e2754280b3df5e69cdc93c98, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/71298ff697c249728c331463d673d03f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c96084fa83f64d92991442b9b3310b30, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e06e2fb19c454925be587d6d49dbb523] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=48.3 K 2024-11-20T22:26:17,026 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:17,026 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f41423be1a1e4773938b6ccfdf869166, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/a56ebe9bf18649238fb1bd7ab76b9a66, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/cd96416ff4f04e1185c34649d53c6086, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f4cdec10fa904e6fb5c2a375446bbc60] 2024-11-20T22:26:17,027 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 15704e48e2754280b3df5e69cdc93c98, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732141574037 2024-11-20T22:26:17,027 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f41423be1a1e4773938b6ccfdf869166, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732141574037 2024-11-20T22:26:17,027 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a56ebe9bf18649238fb1bd7ab76b9a66, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141574168 2024-11-20T22:26:17,027 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 71298ff697c249728c331463d673d03f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141574168 2024-11-20T22:26:17,028 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd96416ff4f04e1185c34649d53c6086, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1732141575302 2024-11-20T22:26:17,028 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c96084fa83f64d92991442b9b3310b30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1732141575302 2024-11-20T22:26:17,028 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e06e2fb19c454925be587d6d49dbb523, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141576469 2024-11-20T22:26:17,028 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4cdec10fa904e6fb5c2a375446bbc60, keycount=400, bloomtype=ROW, size=73.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141576447 2024-11-20T22:26:17,049 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#555 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:17,049 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/89953b46632b48e3aa1425223f97dec0 is 50, key is test_row_0/B:col10/1732141576483/Put/seqid=0 2024-11-20T22:26:17,064 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:17,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742489_1665 (size=12983) 2024-11-20T22:26:17,083 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112003372710be1d441c8f306d22e2dccae1_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:17,087 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112003372710be1d441c8f306d22e2dccae1_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:17,087 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112003372710be1d441c8f306d22e2dccae1_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:17,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742490_1666 (size=4469) 2024-11-20T22:26:17,101 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#556 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:17,108 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/5d17da605ef24ad788d6d5f2c3164311 is 175, key is test_row_0/A:col10/1732141576483/Put/seqid=0 2024-11-20T22:26:17,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742491_1667 (size=31937) 2024-11-20T22:26:17,157 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T22:26:17,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:17,159 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T22:26:17,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:17,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:17,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:17,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:17,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:17,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:17,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f0943ba7d82f4f90bf5d47f546a5bcad_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141576623/Put/seqid=0 2024-11-20T22:26:17,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742492_1668 (size=12454) 2024-11-20T22:26:17,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:17,176 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f0943ba7d82f4f90bf5d47f546a5bcad_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f0943ba7d82f4f90bf5d47f546a5bcad_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:17,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/217947a2f8fd49d2ab0c5ff4b4bfd38c, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:17,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/217947a2f8fd49d2ab0c5ff4b4bfd38c is 175, key is test_row_0/A:col10/1732141576623/Put/seqid=0 2024-11-20T22:26:17,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742493_1669 (size=31255) 2024-11-20T22:26:17,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:17,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:17,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141637250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141637251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141637256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141637259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141637357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141637357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141637358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141637365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,482 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/89953b46632b48e3aa1425223f97dec0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/89953b46632b48e3aa1425223f97dec0 2024-11-20T22:26:17,485 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into 89953b46632b48e3aa1425223f97dec0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:17,485 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:17,485 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=12, startTime=1732141577024; duration=0sec 2024-11-20T22:26:17,485 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:17,485 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:17,485 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T22:26:17,486 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T22:26:17,486 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:17,486 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:17,487 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/267ade1bdf364a32ace9f4ce770e719e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/38897f5cbe8c4138ad59d3a1470972be, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/cd876b76201e4e5dbf7bc313808d7af6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/df909c141f6041cab013a5486e992ac4] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=48.3 K 2024-11-20T22:26:17,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:17,489 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 267ade1bdf364a32ace9f4ce770e719e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732141574037 2024-11-20T22:26:17,490 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 38897f5cbe8c4138ad59d3a1470972be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732141574168 2024-11-20T22:26:17,490 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting cd876b76201e4e5dbf7bc313808d7af6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1732141575302 2024-11-20T22:26:17,490 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting df909c141f6041cab013a5486e992ac4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141576469 2024-11-20T22:26:17,496 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#558 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:17,497 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/bbd6513ff2ec41a391c2e45783f7ab78 is 50, key is test_row_0/C:col10/1732141576483/Put/seqid=0 2024-11-20T22:26:17,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742494_1670 (size=12983) 2024-11-20T22:26:17,527 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/5d17da605ef24ad788d6d5f2c3164311 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/5d17da605ef24ad788d6d5f2c3164311 2024-11-20T22:26:17,532 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into 5d17da605ef24ad788d6d5f2c3164311(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:17,533 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:17,533 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=12, startTime=1732141577024; duration=0sec 2024-11-20T22:26:17,533 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:17,533 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:17,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141637560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141637561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141637562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141637569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,619 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=302, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/217947a2f8fd49d2ab0c5ff4b4bfd38c 2024-11-20T22:26:17,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/601662cd68f748c3a652219730ffd08f is 50, key is test_row_0/B:col10/1732141576623/Put/seqid=0 2024-11-20T22:26:17,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742495_1671 (size=12301) 2024-11-20T22:26:17,629 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/601662cd68f748c3a652219730ffd08f 2024-11-20T22:26:17,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/c11ab57cdf4545008d4a453dcfc89dbb is 50, key is test_row_0/C:col10/1732141576623/Put/seqid=0 2024-11-20T22:26:17,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742496_1672 (size=12301) 2024-11-20T22:26:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141637865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141637865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141637869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:17,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141637873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:17,909 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/bbd6513ff2ec41a391c2e45783f7ab78 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/bbd6513ff2ec41a391c2e45783f7ab78 2024-11-20T22:26:17,914 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into bbd6513ff2ec41a391c2e45783f7ab78(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:17,914 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:17,914 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=12, startTime=1732141577024; duration=0sec 2024-11-20T22:26:17,914 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:17,914 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:18,040 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/c11ab57cdf4545008d4a453dcfc89dbb 2024-11-20T22:26:18,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/217947a2f8fd49d2ab0c5ff4b4bfd38c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/217947a2f8fd49d2ab0c5ff4b4bfd38c 2024-11-20T22:26:18,052 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/217947a2f8fd49d2ab0c5ff4b4bfd38c, entries=150, sequenceid=302, filesize=30.5 K 2024-11-20T22:26:18,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/601662cd68f748c3a652219730ffd08f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/601662cd68f748c3a652219730ffd08f 2024-11-20T22:26:18,057 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/601662cd68f748c3a652219730ffd08f, entries=150, sequenceid=302, filesize=12.0 K 2024-11-20T22:26:18,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/c11ab57cdf4545008d4a453dcfc89dbb as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/c11ab57cdf4545008d4a453dcfc89dbb 2024-11-20T22:26:18,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,071 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/c11ab57cdf4545008d4a453dcfc89dbb, entries=150, sequenceid=302, filesize=12.0 K 2024-11-20T22:26:18,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,076 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1d38cbecc23f382ec5e2809846caa111 in 917ms, sequenceid=302, compaction requested=false 2024-11-20T22:26:18,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:18,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:18,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-20T22:26:18,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-20T22:26:18,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-20T22:26:18,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6930 sec 2024-11-20T22:26:18,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.6990 sec 2024-11-20T22:26:18,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T22:26:18,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:18,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:18,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:18,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:18,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:18,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:18,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:18,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f9b58a188d4e4488a2d9334253c32193_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141577253/Put/seqid=0 2024-11-20T22:26:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742497_1673 (size=12454) 2024-11-20T22:26:18,413 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:18,417 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f9b58a188d4e4488a2d9334253c32193_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9b58a188d4e4488a2d9334253c32193_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:18,418 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/3433f38637cc4638ad8200c3cc8fdc76, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:18,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/3433f38637cc4638ad8200c3cc8fdc76 is 175, key is test_row_0/A:col10/1732141577253/Put/seqid=0 2024-11-20T22:26:18,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141638428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141638430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141638427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141638430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742498_1674 (size=31251) 2024-11-20T22:26:18,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T22:26:18,489 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-20T22:26:18,491 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:18,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-20T22:26:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:18,493 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:18,493 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:18,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:18,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141638534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141638534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141638534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141638534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:18,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:18,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:18,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:18,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:18,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:18,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:18,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:18,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141638745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141638745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141638745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141638748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:18,799 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:18,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:18,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:18,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:18,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:18,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:18,847 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/3433f38637cc4638ad8200c3cc8fdc76 2024-11-20T22:26:18,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/550e3fdbd0b54d368e6697d80a199569 is 50, key is test_row_0/B:col10/1732141577253/Put/seqid=0 2024-11-20T22:26:18,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742499_1675 (size=9857) 2024-11-20T22:26:18,952 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:18,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:18,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:18,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:18,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:18,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:18,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:19,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141639049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141639050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141639053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141639053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:19,108 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:19,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:19,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:19,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:19,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/550e3fdbd0b54d368e6697d80a199569 2024-11-20T22:26:19,261 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:19,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:19,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:19,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:19,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:19,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/8b3cf8c29fa147e292d0eca94cf452df is 50, key is test_row_0/C:col10/1732141577253/Put/seqid=0 2024-11-20T22:26:19,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742500_1676 (size=9857) 2024-11-20T22:26:19,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/8b3cf8c29fa147e292d0eca94cf452df 2024-11-20T22:26:19,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/3433f38637cc4638ad8200c3cc8fdc76 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/3433f38637cc4638ad8200c3cc8fdc76 2024-11-20T22:26:19,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/3433f38637cc4638ad8200c3cc8fdc76, entries=150, sequenceid=317, filesize=30.5 K 2024-11-20T22:26:19,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/550e3fdbd0b54d368e6697d80a199569 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/550e3fdbd0b54d368e6697d80a199569 2024-11-20T22:26:19,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/550e3fdbd0b54d368e6697d80a199569, entries=100, sequenceid=317, filesize=9.6 K 2024-11-20T22:26:19,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/8b3cf8c29fa147e292d0eca94cf452df as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/8b3cf8c29fa147e292d0eca94cf452df 2024-11-20T22:26:19,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/8b3cf8c29fa147e292d0eca94cf452df, entries=100, sequenceid=317, filesize=9.6 K 2024-11-20T22:26:19,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1d38cbecc23f382ec5e2809846caa111 in 948ms, sequenceid=317, compaction requested=true 2024-11-20T22:26:19,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:19,325 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:19,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:19,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:19,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:19,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:19,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:19,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T22:26:19,326 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:19,326 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:19,326 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,326 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/5d17da605ef24ad788d6d5f2c3164311, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/217947a2f8fd49d2ab0c5ff4b4bfd38c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/3433f38637cc4638ad8200c3cc8fdc76] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=92.2 K 2024-11-20T22:26:19,326 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,326 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/5d17da605ef24ad788d6d5f2c3164311, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/217947a2f8fd49d2ab0c5ff4b4bfd38c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/3433f38637cc4638ad8200c3cc8fdc76] 2024-11-20T22:26:19,326 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:19,327 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d17da605ef24ad788d6d5f2c3164311, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141576469 2024-11-20T22:26:19,327 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 217947a2f8fd49d2ab0c5ff4b4bfd38c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732141576613 2024-11-20T22:26:19,328 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3433f38637cc4638ad8200c3cc8fdc76, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732141577253 2024-11-20T22:26:19,328 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:19,328 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:19,328 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,328 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/89953b46632b48e3aa1425223f97dec0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/601662cd68f748c3a652219730ffd08f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/550e3fdbd0b54d368e6697d80a199569] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=34.3 K 2024-11-20T22:26:19,328 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 89953b46632b48e3aa1425223f97dec0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141576469 2024-11-20T22:26:19,329 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 601662cd68f748c3a652219730ffd08f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732141576613 2024-11-20T22:26:19,329 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 550e3fdbd0b54d368e6697d80a199569, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732141577253 2024-11-20T22:26:19,343 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:19,346 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#565 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:19,347 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e59352b827f64be0845276b28aa45500 is 50, key is test_row_0/B:col10/1732141577253/Put/seqid=0 2024-11-20T22:26:19,356 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411202060ec593c754390b7264721c28e04d4_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:19,358 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411202060ec593c754390b7264721c28e04d4_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:19,358 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202060ec593c754390b7264721c28e04d4_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:19,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742501_1677 (size=13085) 2024-11-20T22:26:19,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742502_1678 (size=4469) 2024-11-20T22:26:19,399 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#564 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:19,400 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ddf66450ac5f4b76bbb62a1084edcf2c is 175, key is test_row_0/A:col10/1732141577253/Put/seqid=0 2024-11-20T22:26:19,414 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T22:26:19,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,415 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:26:19,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:19,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:19,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:19,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:19,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:19,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:19,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742503_1679 (size=32146) 2024-11-20T22:26:19,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120548a9d8ff77945069fa2c22982d44073_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141578428/Put/seqid=0 2024-11-20T22:26:19,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742504_1680 (size=12454) 2024-11-20T22:26:19,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:19,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:19,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141639572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141639572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141639574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141639574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:19,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141639678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141639678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141639678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141639679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,799 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e59352b827f64be0845276b28aa45500 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e59352b827f64be0845276b28aa45500 2024-11-20T22:26:19,804 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into e59352b827f64be0845276b28aa45500(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:19,804 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:19,804 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=13, startTime=1732141579325; duration=0sec 2024-11-20T22:26:19,804 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:19,804 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:19,804 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:19,805 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:19,805 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:19,805 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,805 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/bbd6513ff2ec41a391c2e45783f7ab78, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/c11ab57cdf4545008d4a453dcfc89dbb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/8b3cf8c29fa147e292d0eca94cf452df] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=34.3 K 2024-11-20T22:26:19,806 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting bbd6513ff2ec41a391c2e45783f7ab78, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732141576469 2024-11-20T22:26:19,806 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c11ab57cdf4545008d4a453dcfc89dbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732141576613 2024-11-20T22:26:19,806 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b3cf8c29fa147e292d0eca94cf452df, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732141577253 2024-11-20T22:26:19,817 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#567 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:19,817 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d616fa2bd11e45f19cd26f01005e1fb5 is 50, key is test_row_0/C:col10/1732141577253/Put/seqid=0 2024-11-20T22:26:19,855 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ddf66450ac5f4b76bbb62a1084edcf2c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ddf66450ac5f4b76bbb62a1084edcf2c 2024-11-20T22:26:19,861 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into ddf66450ac5f4b76bbb62a1084edcf2c(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:19,861 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:19,861 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=13, startTime=1732141579325; duration=0sec 2024-11-20T22:26:19,861 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:19,861 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:19,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742505_1681 (size=13085) 2024-11-20T22:26:19,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141639883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141639883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141639885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,889 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d616fa2bd11e45f19cd26f01005e1fb5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d616fa2bd11e45f19cd26f01005e1fb5 2024-11-20T22:26:19,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:19,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141639885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:19,899 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into d616fa2bd11e45f19cd26f01005e1fb5(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:19,899 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:19,899 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=13, startTime=1732141579325; duration=0sec 2024-11-20T22:26:19,899 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:19,899 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:19,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:19,922 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120548a9d8ff77945069fa2c22982d44073_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120548a9d8ff77945069fa2c22982d44073_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:19,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/c51c80f28e3e4ec58314cc8010b9c9f2, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:19,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/c51c80f28e3e4ec58314cc8010b9c9f2 is 175, key is test_row_0/A:col10/1732141578428/Put/seqid=0 2024-11-20T22:26:19,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742506_1682 (size=31255) 2024-11-20T22:26:19,961 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=341, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/c51c80f28e3e4ec58314cc8010b9c9f2 2024-11-20T22:26:19,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/046c033ab137491d82b5730b92a0386a is 50, key is test_row_0/B:col10/1732141578428/Put/seqid=0 2024-11-20T22:26:19,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742507_1683 (size=12301) 2024-11-20T22:26:19,976 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/046c033ab137491d82b5730b92a0386a 2024-11-20T22:26:19,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/daee59dae57f4333bf9aa595f87fe0c3 is 50, key is test_row_0/C:col10/1732141578428/Put/seqid=0 2024-11-20T22:26:19,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742508_1684 (size=12301) 2024-11-20T22:26:19,985 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/daee59dae57f4333bf9aa595f87fe0c3 2024-11-20T22:26:19,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/c51c80f28e3e4ec58314cc8010b9c9f2 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/c51c80f28e3e4ec58314cc8010b9c9f2 2024-11-20T22:26:19,991 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/c51c80f28e3e4ec58314cc8010b9c9f2, entries=150, sequenceid=341, filesize=30.5 K 2024-11-20T22:26:19,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/046c033ab137491d82b5730b92a0386a as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/046c033ab137491d82b5730b92a0386a 2024-11-20T22:26:19,993 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/046c033ab137491d82b5730b92a0386a, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T22:26:19,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/daee59dae57f4333bf9aa595f87fe0c3 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/daee59dae57f4333bf9aa595f87fe0c3 2024-11-20T22:26:19,998 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/daee59dae57f4333bf9aa595f87fe0c3, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T22:26:19,999 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 1d38cbecc23f382ec5e2809846caa111 in 584ms, sequenceid=341, compaction requested=false 2024-11-20T22:26:19,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:19,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:19,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-20T22:26:19,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-20T22:26:20,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-20T22:26:20,004 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5070 sec 2024-11-20T22:26:20,005 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.5130 sec 2024-11-20T22:26:20,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:20,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T22:26:20,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:20,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:20,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:20,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:20,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:20,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:20,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120338fb5fe3e1d4d99b45e9c218c269f51_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141580188/Put/seqid=0 2024-11-20T22:26:20,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742509_1685 (size=14994) 2024-11-20T22:26:20,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141640203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141640205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141640205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141640206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141640306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141640308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141640308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141640311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141640509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141640511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141640512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141640516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T22:26:20,596 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-20T22:26:20,597 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:20,599 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:20,604 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120338fb5fe3e1d4d99b45e9c218c269f51_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120338fb5fe3e1d4d99b45e9c218c269f51_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:20,605 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/e4e18dcc2ca84a63b7e4b9a699b5fd18, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:20,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-20T22:26:20,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/e4e18dcc2ca84a63b7e4b9a699b5fd18 is 175, key is test_row_0/A:col10/1732141580188/Put/seqid=0 2024-11-20T22:26:20,606 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:20,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:20,607 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:20,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:20,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742510_1686 (size=39949) 2024-11-20T22:26:20,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:20,758 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:20,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:20,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:20,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:20,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:20,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:20,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:20,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141640814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141640815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141640815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141640819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:20,911 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:20,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:20,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:20,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:20,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:20,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:20,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:20,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,013 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=359, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/e4e18dcc2ca84a63b7e4b9a699b5fd18 2024-11-20T22:26:21,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/c85b356f5b904e85b84254fa989b9bce is 50, key is test_row_0/B:col10/1732141580188/Put/seqid=0 2024-11-20T22:26:21,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742511_1687 (size=12301) 2024-11-20T22:26:21,067 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:21,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:21,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:21,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:21,219 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:21,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:21,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141641316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:21,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:21,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141641318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:21,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:21,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141641319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:21,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:21,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141641323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:21,378 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:21,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:21,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:21,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:21,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/c85b356f5b904e85b84254fa989b9bce 2024-11-20T22:26:21,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/1fae6f77210d463498d5b3f8681f231f is 50, key is test_row_0/C:col10/1732141580188/Put/seqid=0 2024-11-20T22:26:21,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742512_1688 (size=12301) 2024-11-20T22:26:21,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/1fae6f77210d463498d5b3f8681f231f 2024-11-20T22:26:21,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/e4e18dcc2ca84a63b7e4b9a699b5fd18 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e4e18dcc2ca84a63b7e4b9a699b5fd18 2024-11-20T22:26:21,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e4e18dcc2ca84a63b7e4b9a699b5fd18, entries=200, sequenceid=359, filesize=39.0 K 2024-11-20T22:26:21,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/c85b356f5b904e85b84254fa989b9bce as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c85b356f5b904e85b84254fa989b9bce 2024-11-20T22:26:21,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c85b356f5b904e85b84254fa989b9bce, entries=150, sequenceid=359, filesize=12.0 K 2024-11-20T22:26:21,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/1fae6f77210d463498d5b3f8681f231f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/1fae6f77210d463498d5b3f8681f231f 2024-11-20T22:26:21,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/1fae6f77210d463498d5b3f8681f231f, entries=150, sequenceid=359, filesize=12.0 K 2024-11-20T22:26:21,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1d38cbecc23f382ec5e2809846caa111 in 1281ms, sequenceid=359, compaction requested=true 2024-11-20T22:26:21,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:21,470 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:21,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:21,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:21,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:21,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:21,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:21,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:21,470 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:21,471 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103350 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:21,471 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:21,472 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,472 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ddf66450ac5f4b76bbb62a1084edcf2c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/c51c80f28e3e4ec58314cc8010b9c9f2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e4e18dcc2ca84a63b7e4b9a699b5fd18] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=100.9 K 2024-11-20T22:26:21,472 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,472 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ddf66450ac5f4b76bbb62a1084edcf2c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/c51c80f28e3e4ec58314cc8010b9c9f2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e4e18dcc2ca84a63b7e4b9a699b5fd18] 2024-11-20T22:26:21,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:21,472 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddf66450ac5f4b76bbb62a1084edcf2c, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732141576619 2024-11-20T22:26:21,472 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:21,472 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,472 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e59352b827f64be0845276b28aa45500, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/046c033ab137491d82b5730b92a0386a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c85b356f5b904e85b84254fa989b9bce] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=36.8 K 2024-11-20T22:26:21,472 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c51c80f28e3e4ec58314cc8010b9c9f2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141578421 2024-11-20T22:26:21,473 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4e18dcc2ca84a63b7e4b9a699b5fd18, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732141579571 2024-11-20T22:26:21,473 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e59352b827f64be0845276b28aa45500, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732141576619 2024-11-20T22:26:21,473 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 046c033ab137491d82b5730b92a0386a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141578421 2024-11-20T22:26:21,474 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting c85b356f5b904e85b84254fa989b9bce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732141579571 2024-11-20T22:26:21,481 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:21,483 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120ae86b08439f04873bcbf72e9e0d98513_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:21,483 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#574 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:21,483 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/656fff6e7e904bd98c4e2ff0f2cf5529 is 50, key is test_row_0/B:col10/1732141580188/Put/seqid=0 2024-11-20T22:26:21,485 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120ae86b08439f04873bcbf72e9e0d98513_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:21,485 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ae86b08439f04873bcbf72e9e0d98513_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:21,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742513_1689 (size=13187) 2024-11-20T22:26:21,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742514_1690 (size=4469) 2024-11-20T22:26:21,497 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#573 average throughput is 1.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:21,498 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/2f49b64b5079488bb32412df6a79b6af is 175, key is test_row_0/A:col10/1732141580188/Put/seqid=0 2024-11-20T22:26:21,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742515_1691 (size=32141) 2024-11-20T22:26:21,508 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/2f49b64b5079488bb32412df6a79b6af as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2f49b64b5079488bb32412df6a79b6af 2024-11-20T22:26:21,513 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into 2f49b64b5079488bb32412df6a79b6af(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:21,514 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:21,514 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=13, startTime=1732141581469; duration=0sec 2024-11-20T22:26:21,514 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:21,514 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:21,514 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:21,515 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:21,515 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:21,515 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,515 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d616fa2bd11e45f19cd26f01005e1fb5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/daee59dae57f4333bf9aa595f87fe0c3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/1fae6f77210d463498d5b3f8681f231f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=36.8 K 2024-11-20T22:26:21,515 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d616fa2bd11e45f19cd26f01005e1fb5, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732141576619 2024-11-20T22:26:21,515 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting daee59dae57f4333bf9aa595f87fe0c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732141578421 2024-11-20T22:26:21,515 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fae6f77210d463498d5b3f8681f231f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732141579571 2024-11-20T22:26:21,528 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#575 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:21,529 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/6a61896684b24c6ebc6a03fda5073f0d is 50, key is test_row_0/C:col10/1732141580188/Put/seqid=0 2024-11-20T22:26:21,530 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:21,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T22:26:21,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:21,531 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:26:21,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:21,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:21,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:21,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:21,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:21,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:21,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742516_1692 (size=13187) 2024-11-20T22:26:21,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120966e32f0e10b41f2b6f0fdea7b98ac67_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141580204/Put/seqid=0 2024-11-20T22:26:21,541 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/6a61896684b24c6ebc6a03fda5073f0d as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6a61896684b24c6ebc6a03fda5073f0d 2024-11-20T22:26:21,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742517_1693 (size=12454) 2024-11-20T22:26:21,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:21,545 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into 6a61896684b24c6ebc6a03fda5073f0d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:21,545 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:21,545 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=13, startTime=1732141581470; duration=0sec 2024-11-20T22:26:21,545 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:21,545 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:21,545 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120966e32f0e10b41f2b6f0fdea7b98ac67_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120966e32f0e10b41f2b6f0fdea7b98ac67_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b074b1b5422744868685288b2c9aee5f, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b074b1b5422744868685288b2c9aee5f is 175, key is test_row_0/A:col10/1732141580204/Put/seqid=0 2024-11-20T22:26:21,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742518_1694 (size=31255) 2024-11-20T22:26:21,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:21,896 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/656fff6e7e904bd98c4e2ff0f2cf5529 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/656fff6e7e904bd98c4e2ff0f2cf5529 2024-11-20T22:26:21,899 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into 656fff6e7e904bd98c4e2ff0f2cf5529(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:21,899 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:21,899 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=13, startTime=1732141581470; duration=0sec 2024-11-20T22:26:21,899 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:21,899 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:21,950 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=381, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b074b1b5422744868685288b2c9aee5f 2024-11-20T22:26:21,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/4c55699d5e5d4beca680b194253b4762 is 50, key is test_row_0/B:col10/1732141580204/Put/seqid=0 2024-11-20T22:26:21,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742519_1695 (size=12301) 2024-11-20T22:26:22,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:22,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:22,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141642337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141642338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141642338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141642339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,360 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/4c55699d5e5d4beca680b194253b4762 2024-11-20T22:26:22,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/67fa77d214494f09bbdb059976ca2210 is 50, key is test_row_0/C:col10/1732141580204/Put/seqid=0 2024-11-20T22:26:22,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742520_1696 (size=12301) 2024-11-20T22:26:22,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141642439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141642441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141642442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141642445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141642641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141642645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141642645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141642647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:22,774 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/67fa77d214494f09bbdb059976ca2210 2024-11-20T22:26:22,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b074b1b5422744868685288b2c9aee5f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b074b1b5422744868685288b2c9aee5f 2024-11-20T22:26:22,781 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b074b1b5422744868685288b2c9aee5f, entries=150, sequenceid=381, filesize=30.5 K 2024-11-20T22:26:22,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/4c55699d5e5d4beca680b194253b4762 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c55699d5e5d4beca680b194253b4762 2024-11-20T22:26:22,786 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c55699d5e5d4beca680b194253b4762, entries=150, sequenceid=381, filesize=12.0 K 2024-11-20T22:26:22,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/67fa77d214494f09bbdb059976ca2210 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/67fa77d214494f09bbdb059976ca2210 2024-11-20T22:26:22,794 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/67fa77d214494f09bbdb059976ca2210, entries=150, sequenceid=381, filesize=12.0 K 2024-11-20T22:26:22,794 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 1d38cbecc23f382ec5e2809846caa111 in 1263ms, sequenceid=381, compaction requested=false 2024-11-20T22:26:22,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:22,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:22,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-20T22:26:22,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-20T22:26:22,796 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-20T22:26:22,796 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1880 sec 2024-11-20T22:26:22,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 2.1990 sec 2024-11-20T22:26:22,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:22,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T22:26:22,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:22,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:22,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:22,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:22,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:22,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:22,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cb54e2971d994205bb0337a7d113754e_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141582945/Put/seqid=0 2024-11-20T22:26:22,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742521_1697 (size=14994) 2024-11-20T22:26:22,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141642964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141642963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141642966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:22,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:22,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141642967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141643067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141643068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141643069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141643069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141643271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141643271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141643272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141643272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,356 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:23,359 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cb54e2971d994205bb0337a7d113754e_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cb54e2971d994205bb0337a7d113754e_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:23,360 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/55b9a12e6af249c9839cc4731de3a2e6, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:23,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/55b9a12e6af249c9839cc4731de3a2e6 is 175, key is test_row_0/A:col10/1732141582945/Put/seqid=0 2024-11-20T22:26:23,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742522_1698 (size=39949) 2024-11-20T22:26:23,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141643575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141643577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141643577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141643577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:23,774 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=399, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/55b9a12e6af249c9839cc4731de3a2e6 2024-11-20T22:26:23,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e421e527477d47f4b07104604b6395b5 is 50, key is test_row_0/B:col10/1732141582945/Put/seqid=0 2024-11-20T22:26:23,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742523_1699 (size=12301) 2024-11-20T22:26:23,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e421e527477d47f4b07104604b6395b5 2024-11-20T22:26:23,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/6489735779764ef5af341ffb83ca0f65 is 50, key is test_row_0/C:col10/1732141582945/Put/seqid=0 2024-11-20T22:26:23,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742524_1700 (size=12301) 2024-11-20T22:26:23,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/6489735779764ef5af341ffb83ca0f65 2024-11-20T22:26:23,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/55b9a12e6af249c9839cc4731de3a2e6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/55b9a12e6af249c9839cc4731de3a2e6 2024-11-20T22:26:23,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/55b9a12e6af249c9839cc4731de3a2e6, entries=200, sequenceid=399, filesize=39.0 K 2024-11-20T22:26:23,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/e421e527477d47f4b07104604b6395b5 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e421e527477d47f4b07104604b6395b5 2024-11-20T22:26:23,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e421e527477d47f4b07104604b6395b5, entries=150, sequenceid=399, filesize=12.0 K 2024-11-20T22:26:23,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/6489735779764ef5af341ffb83ca0f65 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6489735779764ef5af341ffb83ca0f65 2024-11-20T22:26:23,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6489735779764ef5af341ffb83ca0f65, entries=150, sequenceid=399, filesize=12.0 K 2024-11-20T22:26:23,854 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 1d38cbecc23f382ec5e2809846caa111 in 908ms, sequenceid=399, compaction requested=true 2024-11-20T22:26:23,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:23,854 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:23,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:23,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:23,855 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:23,855 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103345 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:23,856 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:23,856 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:23,856 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2f49b64b5079488bb32412df6a79b6af, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b074b1b5422744868685288b2c9aee5f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/55b9a12e6af249c9839cc4731de3a2e6] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=100.9 K 2024-11-20T22:26:23,856 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:23,856 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2f49b64b5079488bb32412df6a79b6af, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b074b1b5422744868685288b2c9aee5f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/55b9a12e6af249c9839cc4731de3a2e6] 2024-11-20T22:26:23,856 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f49b64b5079488bb32412df6a79b6af, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732141579571 2024-11-20T22:26:23,856 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:23,856 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:23,857 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:23,857 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/656fff6e7e904bd98c4e2ff0f2cf5529, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c55699d5e5d4beca680b194253b4762, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e421e527477d47f4b07104604b6395b5] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=36.9 K 2024-11-20T22:26:23,857 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b074b1b5422744868685288b2c9aee5f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732141580204 2024-11-20T22:26:23,857 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 656fff6e7e904bd98c4e2ff0f2cf5529, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732141579571 2024-11-20T22:26:23,857 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55b9a12e6af249c9839cc4731de3a2e6, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141582336 2024-11-20T22:26:23,858 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c55699d5e5d4beca680b194253b4762, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732141580204 2024-11-20T22:26:23,859 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e421e527477d47f4b07104604b6395b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141582336 2024-11-20T22:26:23,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:23,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:23,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:23,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:23,867 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#582 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:23,867 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/0ff1fd796af14180a9d1ebf05b4d69b8 is 50, key is test_row_0/B:col10/1732141582945/Put/seqid=0 2024-11-20T22:26:23,871 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:23,895 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120bd8c8e40a17f40eaa81a4a7afb213546_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:23,897 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120bd8c8e40a17f40eaa81a4a7afb213546_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:23,898 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bd8c8e40a17f40eaa81a4a7afb213546_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:23,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742525_1701 (size=13289) 2024-11-20T22:26:23,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742526_1702 (size=4469) 2024-11-20T22:26:24,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:24,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T22:26:24,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:24,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:24,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:24,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:24,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:24,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:24,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e6b9fdf9122d4f35995358bfe057fef9_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141582966/Put/seqid=0 2024-11-20T22:26:24,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742527_1703 (size=14994) 2024-11-20T22:26:24,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141644102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141644149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141644149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141644149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141644250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141644255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141644256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141644258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,375 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#583 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:24,376 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/9672830dbaf34687b58b28833e30ae9e is 175, key is test_row_0/A:col10/1732141582945/Put/seqid=0 2024-11-20T22:26:24,379 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/0ff1fd796af14180a9d1ebf05b4d69b8 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/0ff1fd796af14180a9d1ebf05b4d69b8 2024-11-20T22:26:24,406 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into 0ff1fd796af14180a9d1ebf05b4d69b8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:24,406 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:24,406 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=13, startTime=1732141583855; duration=0sec 2024-11-20T22:26:24,406 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:24,406 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:24,406 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:24,419 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:24,419 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:24,419 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:24,419 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6a61896684b24c6ebc6a03fda5073f0d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/67fa77d214494f09bbdb059976ca2210, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6489735779764ef5af341ffb83ca0f65] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=36.9 K 2024-11-20T22:26:24,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742528_1704 (size=32243) 2024-11-20T22:26:24,420 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a61896684b24c6ebc6a03fda5073f0d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732141579571 2024-11-20T22:26:24,430 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 67fa77d214494f09bbdb059976ca2210, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732141580204 2024-11-20T22:26:24,435 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6489735779764ef5af341ffb83ca0f65, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141582336 2024-11-20T22:26:24,462 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/9672830dbaf34687b58b28833e30ae9e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/9672830dbaf34687b58b28833e30ae9e 2024-11-20T22:26:24,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141644457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,466 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#585 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:24,466 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/5a6dc9f69ee4442185fcd46ee8cfc180 is 50, key is test_row_0/C:col10/1732141582945/Put/seqid=0 2024-11-20T22:26:24,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141644462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,468 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into 9672830dbaf34687b58b28833e30ae9e(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:24,468 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:24,468 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=13, startTime=1732141583854; duration=0sec 2024-11-20T22:26:24,469 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:24,469 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:24,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141644463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141644463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742529_1705 (size=13289) 2024-11-20T22:26:24,508 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:24,513 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e6b9fdf9122d4f35995358bfe057fef9_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e6b9fdf9122d4f35995358bfe057fef9_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:24,514 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/101f853f897d4d45a27e3fc811d223be, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:24,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/101f853f897d4d45a27e3fc811d223be is 175, key is test_row_0/A:col10/1732141582966/Put/seqid=0 2024-11-20T22:26:24,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742530_1706 (size=39949) 2024-11-20T22:26:24,528 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=420, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/101f853f897d4d45a27e3fc811d223be 2024-11-20T22:26:24,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/6b0ccdc73c7c40fc82d29675fb77c903 is 50, key is test_row_0/B:col10/1732141582966/Put/seqid=0 2024-11-20T22:26:24,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742531_1707 (size=12301) 2024-11-20T22:26:24,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/6b0ccdc73c7c40fc82d29675fb77c903 2024-11-20T22:26:24,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/eb215ab41b6e4f68969c2ce6cdbcf058 is 50, key is test_row_0/C:col10/1732141582966/Put/seqid=0 2024-11-20T22:26:24,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742532_1708 (size=12301) 2024-11-20T22:26:24,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/eb215ab41b6e4f68969c2ce6cdbcf058 2024-11-20T22:26:24,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/101f853f897d4d45a27e3fc811d223be as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/101f853f897d4d45a27e3fc811d223be 2024-11-20T22:26:24,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/101f853f897d4d45a27e3fc811d223be, entries=200, sequenceid=420, filesize=39.0 K 2024-11-20T22:26:24,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/6b0ccdc73c7c40fc82d29675fb77c903 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/6b0ccdc73c7c40fc82d29675fb77c903 2024-11-20T22:26:24,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/6b0ccdc73c7c40fc82d29675fb77c903, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T22:26:24,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/eb215ab41b6e4f68969c2ce6cdbcf058 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/eb215ab41b6e4f68969c2ce6cdbcf058 2024-11-20T22:26:24,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/eb215ab41b6e4f68969c2ce6cdbcf058, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T22:26:24,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=93.93 KB/96180 for 1d38cbecc23f382ec5e2809846caa111 in 549ms, sequenceid=420, compaction requested=false 2024-11-20T22:26:24,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T22:26:24,712 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-20T22:26:24,714 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T22:26:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-20T22:26:24,716 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T22:26:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:24,716 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T22:26:24,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T22:26:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:24,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T22:26:24,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:24,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:24,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:24,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:24,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:24,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:24,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aeedaa6c1ef14b9cbb660e83fe953860_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141584099/Put/seqid=0 2024-11-20T22:26:24,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742533_1709 (size=12454) 2024-11-20T22:26:24,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141644795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141644800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141644800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141644800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:24,868 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:24,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:24,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:24,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:24,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:24,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:24,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:24,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141644905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141644905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,908 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/5a6dc9f69ee4442185fcd46ee8cfc180 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/5a6dc9f69ee4442185fcd46ee8cfc180 2024-11-20T22:26:24,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141644905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141644905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,911 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into 5a6dc9f69ee4442185fcd46ee8cfc180(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:24,911 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:24,911 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=13, startTime=1732141583862; duration=0sec 2024-11-20T22:26:24,911 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:24,911 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:24,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:24,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43014 deadline: 1732141644990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:24,994 DEBUG [Thread-2577 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18244 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., hostname=6365a1e51efd,46811,1732141422048, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T22:26:25,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:25,022 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,022 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:25,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:25,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,023 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141645110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141645110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141645111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141645115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,175 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:25,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:25,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,175 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,202 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:25,206 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aeedaa6c1ef14b9cbb660e83fe953860_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aeedaa6c1ef14b9cbb660e83fe953860_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:25,212 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ba48dccf97d5405e95f4bdbcb66b55a6, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:25,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ba48dccf97d5405e95f4bdbcb66b55a6 is 175, key is test_row_0/A:col10/1732141584099/Put/seqid=0 2024-11-20T22:26:25,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742534_1710 (size=31255) 2024-11-20T22:26:25,271 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=440, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ba48dccf97d5405e95f4bdbcb66b55a6 2024-11-20T22:26:25,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/a5ef7072c6714baa90d0929b0284a207 is 50, key is test_row_0/B:col10/1732141584099/Put/seqid=0 2024-11-20T22:26:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:25,332 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742535_1711 (size=12301) 2024-11-20T22:26:25,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:25,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:25,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/a5ef7072c6714baa90d0929b0284a207 2024-11-20T22:26:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/e5a8dc9743004e09b4bf873685520e03 is 50, key is test_row_0/C:col10/1732141584099/Put/seqid=0 2024-11-20T22:26:25,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742536_1712 (size=12301) 2024-11-20T22:26:25,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/e5a8dc9743004e09b4bf873685520e03 2024-11-20T22:26:25,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/ba48dccf97d5405e95f4bdbcb66b55a6 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ba48dccf97d5405e95f4bdbcb66b55a6 2024-11-20T22:26:25,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ba48dccf97d5405e95f4bdbcb66b55a6, entries=150, sequenceid=440, filesize=30.5 K 2024-11-20T22:26:25,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/a5ef7072c6714baa90d0929b0284a207 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5ef7072c6714baa90d0929b0284a207 2024-11-20T22:26:25,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5ef7072c6714baa90d0929b0284a207, entries=150, sequenceid=440, filesize=12.0 K 2024-11-20T22:26:25,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/e5a8dc9743004e09b4bf873685520e03 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/e5a8dc9743004e09b4bf873685520e03 2024-11-20T22:26:25,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/e5a8dc9743004e09b4bf873685520e03, entries=150, sequenceid=440, filesize=12.0 K 2024-11-20T22:26:25,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=114.05 KB/116790 for 1d38cbecc23f382ec5e2809846caa111 in 638ms, sequenceid=440, compaction requested=true 2024-11-20T22:26:25,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:25,403 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:25,405 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:25,405 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/A is initiating minor compaction (all files) 2024-11-20T22:26:25,405 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/A in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,405 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/9672830dbaf34687b58b28833e30ae9e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/101f853f897d4d45a27e3fc811d223be, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ba48dccf97d5405e95f4bdbcb66b55a6] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=101.0 K 2024-11-20T22:26:25,405 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,405 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. files: [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/9672830dbaf34687b58b28833e30ae9e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/101f853f897d4d45a27e3fc811d223be, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ba48dccf97d5405e95f4bdbcb66b55a6] 2024-11-20T22:26:25,405 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9672830dbaf34687b58b28833e30ae9e, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141582336 2024-11-20T22:26:25,406 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 101f853f897d4d45a27e3fc811d223be, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141582962 2024-11-20T22:26:25,406 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba48dccf97d5405e95f4bdbcb66b55a6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732141584099 2024-11-20T22:26:25,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T22:26:25,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:25,412 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:25,413 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:25,413 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/B is initiating minor compaction (all files) 2024-11-20T22:26:25,413 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/B in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,413 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/0ff1fd796af14180a9d1ebf05b4d69b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/6b0ccdc73c7c40fc82d29675fb77c903, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5ef7072c6714baa90d0929b0284a207] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=37.0 K 2024-11-20T22:26:25,414 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ff1fd796af14180a9d1ebf05b4d69b8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141582336 2024-11-20T22:26:25,414 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b0ccdc73c7c40fc82d29675fb77c903, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141582962 2024-11-20T22:26:25,415 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting a5ef7072c6714baa90d0929b0284a207, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732141584099 2024-11-20T22:26:25,421 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:25,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T22:26:25,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:25,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1d38cbecc23f382ec5e2809846caa111:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T22:26:25,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:25,435 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#B#compaction#592 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:25,435 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/b5992033dbec40b59ec0c3d9a57e0607 is 50, key is test_row_0/B:col10/1732141584099/Put/seqid=0 2024-11-20T22:26:25,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T22:26:25,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:25,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:25,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:25,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:25,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:25,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:25,439 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120f35798cd29ac4b3e88948dec91cfde6c_1d38cbecc23f382ec5e2809846caa111 store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:25,441 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120f35798cd29ac4b3e88948dec91cfde6c_1d38cbecc23f382ec5e2809846caa111, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:25,441 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f35798cd29ac4b3e88948dec91cfde6c_1d38cbecc23f382ec5e2809846caa111 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:25,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141645453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141645456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141645457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141645457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742537_1713 (size=13391) 2024-11-20T22:26:25,490 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/b5992033dbec40b59ec0c3d9a57e0607 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/b5992033dbec40b59ec0c3d9a57e0607 2024-11-20T22:26:25,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120db68cf0fb614443b808df371b759c48c_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141585435/Put/seqid=0 2024-11-20T22:26:25,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742538_1714 (size=4469) 2024-11-20T22:26:25,497 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/B of 1d38cbecc23f382ec5e2809846caa111 into b5992033dbec40b59ec0c3d9a57e0607(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:25,497 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:25,497 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/B, priority=13, startTime=1732141585412; duration=0sec 2024-11-20T22:26:25,497 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T22:26:25,497 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:B 2024-11-20T22:26:25,497 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T22:26:25,499 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T22:26:25,499 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1540): 1d38cbecc23f382ec5e2809846caa111/C is initiating minor compaction (all files) 2024-11-20T22:26:25,499 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1d38cbecc23f382ec5e2809846caa111/C in TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,499 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/5a6dc9f69ee4442185fcd46ee8cfc180, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/eb215ab41b6e4f68969c2ce6cdbcf058, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/e5a8dc9743004e09b4bf873685520e03] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp, totalSize=37.0 K 2024-11-20T22:26:25,500 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a6dc9f69ee4442185fcd46ee8cfc180, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732141582336 2024-11-20T22:26:25,500 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting eb215ab41b6e4f68969c2ce6cdbcf058, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732141582962 2024-11-20T22:26:25,501 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] compactions.Compactor(224): Compacting e5a8dc9743004e09b4bf873685520e03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732141584099 2024-11-20T22:26:25,506 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#A#compaction#591 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:25,507 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:25,507 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/939148004f9944a885faa2657ee11ab4 is 175, key is test_row_0/A:col10/1732141584099/Put/seqid=0 2024-11-20T22:26:25,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:25,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,549 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1d38cbecc23f382ec5e2809846caa111#C#compaction#594 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T22:26:25,549 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/264c4e61cd0741c792ab35dab9c98b4f is 50, key is test_row_0/C:col10/1732141584099/Put/seqid=0 2024-11-20T22:26:25,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141645560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141645564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141645565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141645566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742539_1715 (size=12454) 2024-11-20T22:26:25,584 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:25,587 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120db68cf0fb614443b808df371b759c48c_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120db68cf0fb614443b808df371b759c48c_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:25,588 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b64b748be6834b98b2b65367179be076, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:25,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b64b748be6834b98b2b65367179be076 is 175, key is test_row_0/A:col10/1732141585435/Put/seqid=0 2024-11-20T22:26:25,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742540_1716 (size=32345) 2024-11-20T22:26:25,597 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/939148004f9944a885faa2657ee11ab4 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/939148004f9944a885faa2657ee11ab4 2024-11-20T22:26:25,601 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/A of 1d38cbecc23f382ec5e2809846caa111 into 939148004f9944a885faa2657ee11ab4(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:25,601 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:25,601 INFO [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/A, priority=13, startTime=1732141585403; duration=0sec 2024-11-20T22:26:25,601 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:25,601 DEBUG [RS:0;6365a1e51efd:46811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:A 2024-11-20T22:26:25,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742541_1717 (size=13391) 2024-11-20T22:26:25,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:25,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:25,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742542_1718 (size=31255) 2024-11-20T22:26:25,677 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=466, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b64b748be6834b98b2b65367179be076 2024-11-20T22:26:25,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/4c1b218e340a4646acf8710236df08e1 is 50, key is test_row_0/B:col10/1732141585435/Put/seqid=0 2024-11-20T22:26:25,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742543_1719 (size=12301) 2024-11-20T22:26:25,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141645763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141645768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141645768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141645770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:25,827 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:25,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:25,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:25,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:25,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:25,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:25,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,072 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/264c4e61cd0741c792ab35dab9c98b4f as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/264c4e61cd0741c792ab35dab9c98b4f 2024-11-20T22:26:26,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:26,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141646068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,084 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1d38cbecc23f382ec5e2809846caa111/C of 1d38cbecc23f382ec5e2809846caa111 into 264c4e61cd0741c792ab35dab9c98b4f(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T22:26:26,084 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:26,084 INFO [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111., storeName=1d38cbecc23f382ec5e2809846caa111/C, priority=13, startTime=1732141585434; duration=0sec 2024-11-20T22:26:26,084 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T22:26:26,084 DEBUG [RS:0;6365a1e51efd:46811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1d38cbecc23f382ec5e2809846caa111:C 2024-11-20T22:26:26,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141646078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141646078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:26,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141646084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/4c1b218e340a4646acf8710236df08e1 2024-11-20T22:26:26,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:26,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:26,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/b0bb14353db246d5b5eb8b2bd897788e is 50, key is test_row_0/C:col10/1732141585435/Put/seqid=0 2024-11-20T22:26:26,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742544_1720 (size=12301) 2024-11-20T22:26:26,314 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:26,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:26,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:26,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:26,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:26,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43002 deadline: 1732141646579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:26,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42996 deadline: 1732141646589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:26,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42978 deadline: 1732141646591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T22:26:26,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46811 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42986 deadline: 1732141646599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/b0bb14353db246d5b5eb8b2bd897788e 2024-11-20T22:26:26,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b64b748be6834b98b2b65367179be076 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b64b748be6834b98b2b65367179be076 2024-11-20T22:26:26,620 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b64b748be6834b98b2b65367179be076, entries=150, sequenceid=466, filesize=30.5 K 2024-11-20T22:26:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/4c1b218e340a4646acf8710236df08e1 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c1b218e340a4646acf8710236df08e1 2024-11-20T22:26:26,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:26,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:26,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T22:26:26,632 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c1b218e340a4646acf8710236df08e1, entries=150, sequenceid=466, filesize=12.0 K 2024-11-20T22:26:26,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/b0bb14353db246d5b5eb8b2bd897788e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/b0bb14353db246d5b5eb8b2bd897788e 2024-11-20T22:26:26,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/b0bb14353db246d5b5eb8b2bd897788e, entries=150, sequenceid=466, filesize=12.0 K 2024-11-20T22:26:26,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1d38cbecc23f382ec5e2809846caa111 in 1224ms, sequenceid=466, compaction requested=false 2024-11-20T22:26:26,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:26,712 DEBUG [Thread-2586 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x57aa792e to 127.0.0.1:51916 2024-11-20T22:26:26,712 DEBUG [Thread-2584 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ff94e66 to 127.0.0.1:51916 2024-11-20T22:26:26,712 DEBUG [Thread-2586 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:26,712 DEBUG [Thread-2584 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:26,713 DEBUG [Thread-2590 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ee91d52 to 127.0.0.1:51916 2024-11-20T22:26:26,713 DEBUG [Thread-2590 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:26,713 DEBUG [Thread-2588 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x362507f8 to 127.0.0.1:51916 2024-11-20T22:26:26,713 DEBUG [Thread-2588 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:26,713 DEBUG [Thread-2582 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x520ba63a to 127.0.0.1:51916 2024-11-20T22:26:26,714 DEBUG [Thread-2582 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:26,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:26,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T22:26:26,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:26,783 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T22:26:26,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:26,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:26,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:26,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:26,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:26,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:26,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209aa9f972b97e420bb17407c1adc1e47d_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_0/A:col10/1732141585455/Put/seqid=0 2024-11-20T22:26:26,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742545_1721 (size=12454) 2024-11-20T22:26:26,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:26,813 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209aa9f972b97e420bb17407c1adc1e47d_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209aa9f972b97e420bb17407c1adc1e47d_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:26,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/47ce5123b3e8429081e47711e9dd07fc, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:26,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/47ce5123b3e8429081e47711e9dd07fc is 175, key is test_row_0/A:col10/1732141585455/Put/seqid=0 2024-11-20T22:26:26,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742546_1722 (size=31255) 2024-11-20T22:26:26,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:27,221 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=480, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/47ce5123b3e8429081e47711e9dd07fc 2024-11-20T22:26:27,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/2460c22ce31346c4af06afbd978288e0 is 50, key is test_row_0/B:col10/1732141585455/Put/seqid=0 2024-11-20T22:26:27,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742547_1723 (size=12301) 2024-11-20T22:26:27,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46811 {}] regionserver.HRegion(8581): Flush requested on 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:27,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. as already flushing 2024-11-20T22:26:27,590 DEBUG [Thread-2573 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x73e77a91 to 127.0.0.1:51916 2024-11-20T22:26:27,590 DEBUG [Thread-2573 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:27,604 DEBUG [Thread-2575 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x470e9e22 to 127.0.0.1:51916 2024-11-20T22:26:27,604 DEBUG [Thread-2575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:27,606 DEBUG [Thread-2579 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x55c3d878 to 127.0.0.1:51916 2024-11-20T22:26:27,607 DEBUG [Thread-2579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:27,612 DEBUG [Thread-2571 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62fe681e to 127.0.0.1:51916 2024-11-20T22:26:27,612 DEBUG [Thread-2571 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:27,665 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/2460c22ce31346c4af06afbd978288e0 2024-11-20T22:26:27,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/f97dfb3488a541378fc27964836a5d0e is 50, key is test_row_0/C:col10/1732141585455/Put/seqid=0 2024-11-20T22:26:27,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742548_1724 (size=12301) 2024-11-20T22:26:28,086 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/f97dfb3488a541378fc27964836a5d0e 2024-11-20T22:26:28,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/47ce5123b3e8429081e47711e9dd07fc as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47ce5123b3e8429081e47711e9dd07fc 2024-11-20T22:26:28,092 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47ce5123b3e8429081e47711e9dd07fc, entries=150, sequenceid=480, filesize=30.5 K 2024-11-20T22:26:28,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/2460c22ce31346c4af06afbd978288e0 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/2460c22ce31346c4af06afbd978288e0 2024-11-20T22:26:28,096 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/2460c22ce31346c4af06afbd978288e0, entries=150, sequenceid=480, filesize=12.0 K 2024-11-20T22:26:28,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/f97dfb3488a541378fc27964836a5d0e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f97dfb3488a541378fc27964836a5d0e 2024-11-20T22:26:28,100 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f97dfb3488a541378fc27964836a5d0e, entries=150, sequenceid=480, filesize=12.0 K 2024-11-20T22:26:28,100 INFO [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=26.84 KB/27480 for 1d38cbecc23f382ec5e2809846caa111 in 1317ms, sequenceid=480, compaction requested=true 2024-11-20T22:26:28,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:28,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:28,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6365a1e51efd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-20T22:26:28,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-20T22:26:28,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-20T22:26:28,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3850 sec 2024-11-20T22:26:28,103 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 3.3880 sec 2024-11-20T22:26:28,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T22:26:28,824 INFO [Thread-2581 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-20T22:26:35,091 DEBUG [Thread-2577 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x722e4d03 to 127.0.0.1:51916 2024-11-20T22:26:35,091 DEBUG [Thread-2577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 102 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 3 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4303 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4249 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4247 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4360 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4221 2024-11-20T22:26:35,094 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T22:26:35,095 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:26:35,095 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ae9fdd3 to 127.0.0.1:51916 2024-11-20T22:26:35,095 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:35,104 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T22:26:35,110 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T22:26:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:35,143 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141595143"}]},"ts":"1732141595143"} 2024-11-20T22:26:35,145 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T22:26:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T22:26:35,159 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T22:26:35,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T22:26:35,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, UNASSIGN}] 2024-11-20T22:26:35,162 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, UNASSIGN 2024-11-20T22:26:35,163 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=1d38cbecc23f382ec5e2809846caa111, regionState=CLOSING, regionLocation=6365a1e51efd,46811,1732141422048 2024-11-20T22:26:35,164 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T22:26:35,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; CloseRegionProcedure 1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048}] 2024-11-20T22:26:35,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T22:26:35,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:35,317 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:35,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T22:26:35,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 1d38cbecc23f382ec5e2809846caa111, disabling compactions & flushes 2024-11-20T22:26:35,317 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:35,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:35,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. after waiting 0 ms 2024-11-20T22:26:35,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:35,317 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(2837): Flushing 1d38cbecc23f382ec5e2809846caa111 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T22:26:35,317 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=A 2024-11-20T22:26:35,318 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:35,318 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=B 2024-11-20T22:26:35,318 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:35,318 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1d38cbecc23f382ec5e2809846caa111, store=C 2024-11-20T22:26:35,318 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T22:26:35,326 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206e758d3e4ced436aa18b5c322276e827_1d38cbecc23f382ec5e2809846caa111 is 50, key is test_row_1/A:col10/1732141595084/Put/seqid=0 2024-11-20T22:26:35,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742549_1725 (size=9914) 2024-11-20T22:26:35,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T22:26:35,737 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T22:26:35,742 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206e758d3e4ced436aa18b5c322276e827_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206e758d3e4ced436aa18b5c322276e827_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:35,743 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b2b40e015ad84c9c973f77d9760747f7, store: [table=TestAcidGuarantees family=A region=1d38cbecc23f382ec5e2809846caa111] 2024-11-20T22:26:35,744 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b2b40e015ad84c9c973f77d9760747f7 is 175, key is test_row_1/A:col10/1732141595084/Put/seqid=0 2024-11-20T22:26:35,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742550_1726 (size=22561) 2024-11-20T22:26:35,748 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=488, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b2b40e015ad84c9c973f77d9760747f7 2024-11-20T22:26:35,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T22:26:35,754 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/a5003614c671498d8b548ada7d2d1002 is 50, key is test_row_1/B:col10/1732141595084/Put/seqid=0 2024-11-20T22:26:35,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742551_1727 (size=9857) 2024-11-20T22:26:36,158 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/a5003614c671498d8b548ada7d2d1002 2024-11-20T22:26:36,163 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d7cf7ddac75d47609cc92031261045eb is 50, key is test_row_1/C:col10/1732141595084/Put/seqid=0 2024-11-20T22:26:36,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742552_1728 (size=9857) 2024-11-20T22:26:36,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T22:26:36,566 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d7cf7ddac75d47609cc92031261045eb 2024-11-20T22:26:36,570 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/A/b2b40e015ad84c9c973f77d9760747f7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b2b40e015ad84c9c973f77d9760747f7 2024-11-20T22:26:36,574 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b2b40e015ad84c9c973f77d9760747f7, entries=100, sequenceid=488, filesize=22.0 K 2024-11-20T22:26:36,574 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/B/a5003614c671498d8b548ada7d2d1002 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5003614c671498d8b548ada7d2d1002 2024-11-20T22:26:36,577 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5003614c671498d8b548ada7d2d1002, entries=100, sequenceid=488, filesize=9.6 K 2024-11-20T22:26:36,577 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/.tmp/C/d7cf7ddac75d47609cc92031261045eb as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d7cf7ddac75d47609cc92031261045eb 2024-11-20T22:26:36,580 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d7cf7ddac75d47609cc92031261045eb, entries=100, sequenceid=488, filesize=9.6 K 2024-11-20T22:26:36,580 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 1d38cbecc23f382ec5e2809846caa111 in 1263ms, sequenceid=488, compaction requested=true 2024-11-20T22:26:36,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2275c30e30cb45b083ff43318caff0cc, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0718c620ccdd4280aac0b7a7f47bbd8e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/7e9e6828ff7e40d199be25e54f71ee8b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ab11ff95fc4b48dbb8d33555f2ed98b1, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47b820fcf1404a51adeef205ee02ff1d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ee884b77acb045df9e37359729db9d40, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/66646e129ec9488397903b651e13d29f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8ac3e4f3c1844bbe8575ab5b40a502a8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e2a60e9704434218adb124cb277401a3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/18bab7be2dd44b439813bf34248140f9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8f72332cce07408085c59b0d5fa2a10c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/94043d1049f34c278252c61e69d0f3f0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0fe50a8a090f4693a3f29aae68cd9ad6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f18d1b319f7a4fb8a4f110def36c73d3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/4f675db09f5a45458f38724e5828e737, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f012ea9a96cf418f98986dbafbcd2449, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f41423be1a1e4773938b6ccfdf869166, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/a56ebe9bf18649238fb1bd7ab76b9a66, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/cd96416ff4f04e1185c34649d53c6086, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f4cdec10fa904e6fb5c2a375446bbc60, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/5d17da605ef24ad788d6d5f2c3164311, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/217947a2f8fd49d2ab0c5ff4b4bfd38c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ddf66450ac5f4b76bbb62a1084edcf2c, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/3433f38637cc4638ad8200c3cc8fdc76, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/c51c80f28e3e4ec58314cc8010b9c9f2, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e4e18dcc2ca84a63b7e4b9a699b5fd18, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2f49b64b5079488bb32412df6a79b6af, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b074b1b5422744868685288b2c9aee5f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/55b9a12e6af249c9839cc4731de3a2e6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/9672830dbaf34687b58b28833e30ae9e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/101f853f897d4d45a27e3fc811d223be, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ba48dccf97d5405e95f4bdbcb66b55a6] to archive 2024-11-20T22:26:36,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:36,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2275c30e30cb45b083ff43318caff0cc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2275c30e30cb45b083ff43318caff0cc 2024-11-20T22:26:36,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0718c620ccdd4280aac0b7a7f47bbd8e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0718c620ccdd4280aac0b7a7f47bbd8e 2024-11-20T22:26:36,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/7e9e6828ff7e40d199be25e54f71ee8b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/7e9e6828ff7e40d199be25e54f71ee8b 2024-11-20T22:26:36,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ab11ff95fc4b48dbb8d33555f2ed98b1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ab11ff95fc4b48dbb8d33555f2ed98b1 2024-11-20T22:26:36,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47b820fcf1404a51adeef205ee02ff1d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47b820fcf1404a51adeef205ee02ff1d 2024-11-20T22:26:36,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ee884b77acb045df9e37359729db9d40 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ee884b77acb045df9e37359729db9d40 2024-11-20T22:26:36,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/66646e129ec9488397903b651e13d29f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/66646e129ec9488397903b651e13d29f 2024-11-20T22:26:36,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8ac3e4f3c1844bbe8575ab5b40a502a8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8ac3e4f3c1844bbe8575ab5b40a502a8 2024-11-20T22:26:36,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e2a60e9704434218adb124cb277401a3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e2a60e9704434218adb124cb277401a3 2024-11-20T22:26:36,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/18bab7be2dd44b439813bf34248140f9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/18bab7be2dd44b439813bf34248140f9 2024-11-20T22:26:36,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8f72332cce07408085c59b0d5fa2a10c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/8f72332cce07408085c59b0d5fa2a10c 2024-11-20T22:26:36,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/94043d1049f34c278252c61e69d0f3f0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/94043d1049f34c278252c61e69d0f3f0 2024-11-20T22:26:36,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0fe50a8a090f4693a3f29aae68cd9ad6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/0fe50a8a090f4693a3f29aae68cd9ad6 2024-11-20T22:26:36,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f18d1b319f7a4fb8a4f110def36c73d3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f18d1b319f7a4fb8a4f110def36c73d3 2024-11-20T22:26:36,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/4f675db09f5a45458f38724e5828e737 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/4f675db09f5a45458f38724e5828e737 2024-11-20T22:26:36,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f012ea9a96cf418f98986dbafbcd2449 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f012ea9a96cf418f98986dbafbcd2449 2024-11-20T22:26:36,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f41423be1a1e4773938b6ccfdf869166 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f41423be1a1e4773938b6ccfdf869166 2024-11-20T22:26:36,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/a56ebe9bf18649238fb1bd7ab76b9a66 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/a56ebe9bf18649238fb1bd7ab76b9a66 2024-11-20T22:26:36,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/cd96416ff4f04e1185c34649d53c6086 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/cd96416ff4f04e1185c34649d53c6086 2024-11-20T22:26:36,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f4cdec10fa904e6fb5c2a375446bbc60 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/f4cdec10fa904e6fb5c2a375446bbc60 2024-11-20T22:26:36,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/5d17da605ef24ad788d6d5f2c3164311 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/5d17da605ef24ad788d6d5f2c3164311 2024-11-20T22:26:36,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/217947a2f8fd49d2ab0c5ff4b4bfd38c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/217947a2f8fd49d2ab0c5ff4b4bfd38c 2024-11-20T22:26:36,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ddf66450ac5f4b76bbb62a1084edcf2c to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ddf66450ac5f4b76bbb62a1084edcf2c 2024-11-20T22:26:36,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/3433f38637cc4638ad8200c3cc8fdc76 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/3433f38637cc4638ad8200c3cc8fdc76 2024-11-20T22:26:36,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/c51c80f28e3e4ec58314cc8010b9c9f2 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/c51c80f28e3e4ec58314cc8010b9c9f2 2024-11-20T22:26:36,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e4e18dcc2ca84a63b7e4b9a699b5fd18 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/e4e18dcc2ca84a63b7e4b9a699b5fd18 2024-11-20T22:26:36,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2f49b64b5079488bb32412df6a79b6af to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/2f49b64b5079488bb32412df6a79b6af 2024-11-20T22:26:36,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b074b1b5422744868685288b2c9aee5f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b074b1b5422744868685288b2c9aee5f 2024-11-20T22:26:36,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/55b9a12e6af249c9839cc4731de3a2e6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/55b9a12e6af249c9839cc4731de3a2e6 2024-11-20T22:26:36,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/9672830dbaf34687b58b28833e30ae9e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/9672830dbaf34687b58b28833e30ae9e 2024-11-20T22:26:36,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/101f853f897d4d45a27e3fc811d223be to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/101f853f897d4d45a27e3fc811d223be 2024-11-20T22:26:36,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ba48dccf97d5405e95f4bdbcb66b55a6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/ba48dccf97d5405e95f4bdbcb66b55a6 2024-11-20T22:26:36,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/fef794012ca84443bb837adb6eab9959, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/1d1f1733451f4f0ab7f72f2a5c42efab, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/db2b124e55df4db7805de73a41bf021e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e3925339933e4d769a23ca2389ac4c3f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/26a996f5a8694d539de69e2c13a01149, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/974dc958715f49fc9b6a8817eb2fb31b, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d619563bbb2c49c6950bb8aa7dd335af, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/23b76ea63d114240b8986ac04676619a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e4ecebae8e4246178d5103e10d713074, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/92cab52edaec436282660c0d36374ba4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/16404ac2e68b45bc97bae042ef2cad4d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/644f8e4bd4bb4a538faf446c85717f21, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/5d412837fb464756a10e728ddba96d71, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d4b3b31a6b1d4bc69d68d35edd6b8b99, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/ed08c773b6644bfdbba9e56bb8f065b5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/15704e48e2754280b3df5e69cdc93c98, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/652ceb85d12f46ebb881c0c298f2dede, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/71298ff697c249728c331463d673d03f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c96084fa83f64d92991442b9b3310b30, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/89953b46632b48e3aa1425223f97dec0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e06e2fb19c454925be587d6d49dbb523, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/601662cd68f748c3a652219730ffd08f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e59352b827f64be0845276b28aa45500, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/550e3fdbd0b54d368e6697d80a199569, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/046c033ab137491d82b5730b92a0386a, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/656fff6e7e904bd98c4e2ff0f2cf5529, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c85b356f5b904e85b84254fa989b9bce, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c55699d5e5d4beca680b194253b4762, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/0ff1fd796af14180a9d1ebf05b4d69b8, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e421e527477d47f4b07104604b6395b5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/6b0ccdc73c7c40fc82d29675fb77c903, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5ef7072c6714baa90d0929b0284a207] to archive 2024-11-20T22:26:36,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:36,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/fef794012ca84443bb837adb6eab9959 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/fef794012ca84443bb837adb6eab9959 2024-11-20T22:26:36,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/1d1f1733451f4f0ab7f72f2a5c42efab to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/1d1f1733451f4f0ab7f72f2a5c42efab 2024-11-20T22:26:36,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/db2b124e55df4db7805de73a41bf021e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/db2b124e55df4db7805de73a41bf021e 2024-11-20T22:26:36,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e3925339933e4d769a23ca2389ac4c3f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e3925339933e4d769a23ca2389ac4c3f 2024-11-20T22:26:36,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/26a996f5a8694d539de69e2c13a01149 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/26a996f5a8694d539de69e2c13a01149 2024-11-20T22:26:36,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/974dc958715f49fc9b6a8817eb2fb31b to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/974dc958715f49fc9b6a8817eb2fb31b 2024-11-20T22:26:36,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d619563bbb2c49c6950bb8aa7dd335af to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d619563bbb2c49c6950bb8aa7dd335af 2024-11-20T22:26:36,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/23b76ea63d114240b8986ac04676619a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/23b76ea63d114240b8986ac04676619a 2024-11-20T22:26:36,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e4ecebae8e4246178d5103e10d713074 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e4ecebae8e4246178d5103e10d713074 2024-11-20T22:26:36,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/92cab52edaec436282660c0d36374ba4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/92cab52edaec436282660c0d36374ba4 2024-11-20T22:26:36,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/16404ac2e68b45bc97bae042ef2cad4d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/16404ac2e68b45bc97bae042ef2cad4d 2024-11-20T22:26:36,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/644f8e4bd4bb4a538faf446c85717f21 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/644f8e4bd4bb4a538faf446c85717f21 2024-11-20T22:26:36,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/5d412837fb464756a10e728ddba96d71 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/5d412837fb464756a10e728ddba96d71 2024-11-20T22:26:36,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d4b3b31a6b1d4bc69d68d35edd6b8b99 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/d4b3b31a6b1d4bc69d68d35edd6b8b99 2024-11-20T22:26:36,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/ed08c773b6644bfdbba9e56bb8f065b5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/ed08c773b6644bfdbba9e56bb8f065b5 2024-11-20T22:26:36,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/15704e48e2754280b3df5e69cdc93c98 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/15704e48e2754280b3df5e69cdc93c98 2024-11-20T22:26:36,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/652ceb85d12f46ebb881c0c298f2dede to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/652ceb85d12f46ebb881c0c298f2dede 2024-11-20T22:26:36,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/71298ff697c249728c331463d673d03f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/71298ff697c249728c331463d673d03f 2024-11-20T22:26:36,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c96084fa83f64d92991442b9b3310b30 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c96084fa83f64d92991442b9b3310b30 2024-11-20T22:26:36,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/89953b46632b48e3aa1425223f97dec0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/89953b46632b48e3aa1425223f97dec0 2024-11-20T22:26:36,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e06e2fb19c454925be587d6d49dbb523 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e06e2fb19c454925be587d6d49dbb523 2024-11-20T22:26:36,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/601662cd68f748c3a652219730ffd08f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/601662cd68f748c3a652219730ffd08f 2024-11-20T22:26:36,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e59352b827f64be0845276b28aa45500 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e59352b827f64be0845276b28aa45500 2024-11-20T22:26:36,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/550e3fdbd0b54d368e6697d80a199569 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/550e3fdbd0b54d368e6697d80a199569 2024-11-20T22:26:36,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/046c033ab137491d82b5730b92a0386a to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/046c033ab137491d82b5730b92a0386a 2024-11-20T22:26:36,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/656fff6e7e904bd98c4e2ff0f2cf5529 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/656fff6e7e904bd98c4e2ff0f2cf5529 2024-11-20T22:26:36,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c85b356f5b904e85b84254fa989b9bce to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/c85b356f5b904e85b84254fa989b9bce 2024-11-20T22:26:36,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c55699d5e5d4beca680b194253b4762 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c55699d5e5d4beca680b194253b4762 2024-11-20T22:26:36,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/0ff1fd796af14180a9d1ebf05b4d69b8 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/0ff1fd796af14180a9d1ebf05b4d69b8 2024-11-20T22:26:36,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e421e527477d47f4b07104604b6395b5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/e421e527477d47f4b07104604b6395b5 2024-11-20T22:26:36,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/6b0ccdc73c7c40fc82d29675fb77c903 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/6b0ccdc73c7c40fc82d29675fb77c903 2024-11-20T22:26:36,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5ef7072c6714baa90d0929b0284a207 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5ef7072c6714baa90d0929b0284a207 2024-11-20T22:26:36,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/42d2ff01a4514ac89670e69880ac39e0, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9a3b59419e3949fb87b8fa4b84a23bc9, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d95aefd3b0e347f19b2d1852b84a2635, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/2dd1eb85746e4da4a3ad389d812fc4f4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/10f91229d4134723aff0a47663e6d4c6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/121dc1e127bf463993b64d1a42a69be6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/4d3ec0b3670b428a91c93855d447548f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/af365d71224f4d728e75cda773ed2889, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9f9507440b294e7a8d5f18bb712e8a29, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/51adb3186946450385d89fb63b023bb4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f9b683d66c0b407cab958c035b8f8b17, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/282864c1250040d0b0115a5552dd7b93, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/203d8480191a47e9a2c44198d3ad12cf, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d05273e6c64a4c6ca5db163758b4078f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/3f3ef7ee800244c890c423095608def5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/267ade1bdf364a32ace9f4ce770e719e, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/0d373eb10d8c439496436238684c985d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/38897f5cbe8c4138ad59d3a1470972be, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/cd876b76201e4e5dbf7bc313808d7af6, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/bbd6513ff2ec41a391c2e45783f7ab78, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/df909c141f6041cab013a5486e992ac4, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/c11ab57cdf4545008d4a453dcfc89dbb, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d616fa2bd11e45f19cd26f01005e1fb5, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/8b3cf8c29fa147e292d0eca94cf452df, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/daee59dae57f4333bf9aa595f87fe0c3, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6a61896684b24c6ebc6a03fda5073f0d, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/1fae6f77210d463498d5b3f8681f231f, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/67fa77d214494f09bbdb059976ca2210, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/5a6dc9f69ee4442185fcd46ee8cfc180, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6489735779764ef5af341ffb83ca0f65, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/eb215ab41b6e4f68969c2ce6cdbcf058, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/e5a8dc9743004e09b4bf873685520e03] to archive 2024-11-20T22:26:36,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T22:26:36,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/42d2ff01a4514ac89670e69880ac39e0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/42d2ff01a4514ac89670e69880ac39e0 2024-11-20T22:26:36,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9a3b59419e3949fb87b8fa4b84a23bc9 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9a3b59419e3949fb87b8fa4b84a23bc9 2024-11-20T22:26:36,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d95aefd3b0e347f19b2d1852b84a2635 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d95aefd3b0e347f19b2d1852b84a2635 2024-11-20T22:26:36,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/2dd1eb85746e4da4a3ad389d812fc4f4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/2dd1eb85746e4da4a3ad389d812fc4f4 2024-11-20T22:26:36,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/10f91229d4134723aff0a47663e6d4c6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/10f91229d4134723aff0a47663e6d4c6 2024-11-20T22:26:36,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/121dc1e127bf463993b64d1a42a69be6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/121dc1e127bf463993b64d1a42a69be6 2024-11-20T22:26:36,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/4d3ec0b3670b428a91c93855d447548f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/4d3ec0b3670b428a91c93855d447548f 2024-11-20T22:26:36,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/af365d71224f4d728e75cda773ed2889 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/af365d71224f4d728e75cda773ed2889 2024-11-20T22:26:36,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9f9507440b294e7a8d5f18bb712e8a29 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/9f9507440b294e7a8d5f18bb712e8a29 2024-11-20T22:26:36,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/51adb3186946450385d89fb63b023bb4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/51adb3186946450385d89fb63b023bb4 2024-11-20T22:26:36,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f9b683d66c0b407cab958c035b8f8b17 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f9b683d66c0b407cab958c035b8f8b17 2024-11-20T22:26:36,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/282864c1250040d0b0115a5552dd7b93 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/282864c1250040d0b0115a5552dd7b93 2024-11-20T22:26:36,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/203d8480191a47e9a2c44198d3ad12cf to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/203d8480191a47e9a2c44198d3ad12cf 2024-11-20T22:26:36,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d05273e6c64a4c6ca5db163758b4078f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d05273e6c64a4c6ca5db163758b4078f 2024-11-20T22:26:36,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/3f3ef7ee800244c890c423095608def5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/3f3ef7ee800244c890c423095608def5 2024-11-20T22:26:36,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/267ade1bdf364a32ace9f4ce770e719e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/267ade1bdf364a32ace9f4ce770e719e 2024-11-20T22:26:36,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/0d373eb10d8c439496436238684c985d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/0d373eb10d8c439496436238684c985d 2024-11-20T22:26:36,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/38897f5cbe8c4138ad59d3a1470972be to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/38897f5cbe8c4138ad59d3a1470972be 2024-11-20T22:26:36,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/cd876b76201e4e5dbf7bc313808d7af6 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/cd876b76201e4e5dbf7bc313808d7af6 2024-11-20T22:26:36,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/bbd6513ff2ec41a391c2e45783f7ab78 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/bbd6513ff2ec41a391c2e45783f7ab78 2024-11-20T22:26:36,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/df909c141f6041cab013a5486e992ac4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/df909c141f6041cab013a5486e992ac4 2024-11-20T22:26:36,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/c11ab57cdf4545008d4a453dcfc89dbb to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/c11ab57cdf4545008d4a453dcfc89dbb 2024-11-20T22:26:36,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d616fa2bd11e45f19cd26f01005e1fb5 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d616fa2bd11e45f19cd26f01005e1fb5 2024-11-20T22:26:36,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/8b3cf8c29fa147e292d0eca94cf452df to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/8b3cf8c29fa147e292d0eca94cf452df 2024-11-20T22:26:36,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/daee59dae57f4333bf9aa595f87fe0c3 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/daee59dae57f4333bf9aa595f87fe0c3 2024-11-20T22:26:36,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6a61896684b24c6ebc6a03fda5073f0d to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6a61896684b24c6ebc6a03fda5073f0d 2024-11-20T22:26:36,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/1fae6f77210d463498d5b3f8681f231f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/1fae6f77210d463498d5b3f8681f231f 2024-11-20T22:26:36,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/67fa77d214494f09bbdb059976ca2210 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/67fa77d214494f09bbdb059976ca2210 2024-11-20T22:26:36,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/5a6dc9f69ee4442185fcd46ee8cfc180 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/5a6dc9f69ee4442185fcd46ee8cfc180 2024-11-20T22:26:36,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6489735779764ef5af341ffb83ca0f65 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/6489735779764ef5af341ffb83ca0f65 2024-11-20T22:26:36,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/eb215ab41b6e4f68969c2ce6cdbcf058 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/eb215ab41b6e4f68969c2ce6cdbcf058 2024-11-20T22:26:36,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/e5a8dc9743004e09b4bf873685520e03 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/e5a8dc9743004e09b4bf873685520e03 2024-11-20T22:26:36,687 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/recovered.edits/491.seqid, newMaxSeqId=491, maxSeqId=4 2024-11-20T22:26:36,688 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111. 2024-11-20T22:26:36,688 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 1d38cbecc23f382ec5e2809846caa111: 2024-11-20T22:26:36,689 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:36,690 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=1d38cbecc23f382ec5e2809846caa111, regionState=CLOSED 2024-11-20T22:26:36,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-20T22:26:36,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; CloseRegionProcedure 1d38cbecc23f382ec5e2809846caa111, server=6365a1e51efd,46811,1732141422048 in 1.5260 sec 2024-11-20T22:26:36,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-11-20T22:26:36,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1d38cbecc23f382ec5e2809846caa111, UNASSIGN in 1.5300 sec 2024-11-20T22:26:36,694 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-20T22:26:36,694 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5330 sec 2024-11-20T22:26:36,695 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732141596695"}]},"ts":"1732141596695"} 2024-11-20T22:26:36,696 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T22:26:36,704 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T22:26:36,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5940 sec 2024-11-20T22:26:37,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T22:26:37,253 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-20T22:26:37,254 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T22:26:37,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:37,255 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T22:26:37,256 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:37,258 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,260 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C, FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/recovered.edits] 2024-11-20T22:26:37,262 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47ce5123b3e8429081e47711e9dd07fc to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/47ce5123b3e8429081e47711e9dd07fc 2024-11-20T22:26:37,262 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/939148004f9944a885faa2657ee11ab4 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/939148004f9944a885faa2657ee11ab4 2024-11-20T22:26:37,263 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b2b40e015ad84c9c973f77d9760747f7 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b2b40e015ad84c9c973f77d9760747f7 2024-11-20T22:26:37,264 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b64b748be6834b98b2b65367179be076 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/A/b64b748be6834b98b2b65367179be076 2024-11-20T22:26:37,266 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/2460c22ce31346c4af06afbd978288e0 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/2460c22ce31346c4af06afbd978288e0 2024-11-20T22:26:37,267 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c1b218e340a4646acf8710236df08e1 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/4c1b218e340a4646acf8710236df08e1 2024-11-20T22:26:37,268 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5003614c671498d8b548ada7d2d1002 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/a5003614c671498d8b548ada7d2d1002 2024-11-20T22:26:37,268 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/b5992033dbec40b59ec0c3d9a57e0607 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/B/b5992033dbec40b59ec0c3d9a57e0607 2024-11-20T22:26:37,270 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/264c4e61cd0741c792ab35dab9c98b4f to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/264c4e61cd0741c792ab35dab9c98b4f 2024-11-20T22:26:37,271 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/b0bb14353db246d5b5eb8b2bd897788e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/b0bb14353db246d5b5eb8b2bd897788e 2024-11-20T22:26:37,272 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d7cf7ddac75d47609cc92031261045eb to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/d7cf7ddac75d47609cc92031261045eb 2024-11-20T22:26:37,273 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f97dfb3488a541378fc27964836a5d0e to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/C/f97dfb3488a541378fc27964836a5d0e 2024-11-20T22:26:37,275 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/recovered.edits/491.seqid to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111/recovered.edits/491.seqid 2024-11-20T22:26:37,276 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/default/TestAcidGuarantees/1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,276 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T22:26:37,276 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:26:37,277 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T22:26:37,280 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120008e1ecf3f514b9dbfd4052556802f87_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120008e1ecf3f514b9dbfd4052556802f87_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,280 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201418b487e40647fe80a31f7a5828e3ea_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201418b487e40647fe80a31f7a5828e3ea_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,281 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120338fb5fe3e1d4d99b45e9c218c269f51_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120338fb5fe3e1d4d99b45e9c218c269f51_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,282 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203cbc40dc516b44dda5f9a0b20e4087be_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203cbc40dc516b44dda5f9a0b20e4087be_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,283 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120506fb630728248d8bae7ee2e867a2a8a_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120506fb630728248d8bae7ee2e867a2a8a_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,284 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120513698f9d050427190db8574d5644c10_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120513698f9d050427190db8574d5644c10_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,285 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120548a9d8ff77945069fa2c22982d44073_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120548a9d8ff77945069fa2c22982d44073_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,286 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120557b55f47a254e4db86da6e24c77d205_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120557b55f47a254e4db86da6e24c77d205_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,287 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e22a67813294222beb34c5f9105898a_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e22a67813294222beb34c5f9105898a_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,288 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e71f0f7222a4b8dbac3f9d9a8e93443_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e71f0f7222a4b8dbac3f9d9a8e93443_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,289 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206e758d3e4ced436aa18b5c322276e827_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206e758d3e4ced436aa18b5c322276e827_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,289 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120722043a241414522a74dc51807eba0ea_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120722043a241414522a74dc51807eba0ea_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,290 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c0c64dc4e1e4a27992b98a1bb692f59_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c0c64dc4e1e4a27992b98a1bb692f59_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,291 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120966e32f0e10b41f2b6f0fdea7b98ac67_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120966e32f0e10b41f2b6f0fdea7b98ac67_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,291 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209aa9f972b97e420bb17407c1adc1e47d_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209aa9f972b97e420bb17407c1adc1e47d_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,292 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aeedaa6c1ef14b9cbb660e83fe953860_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aeedaa6c1ef14b9cbb660e83fe953860_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,293 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af0fbeb0be074e2aab27075ed1ed61db_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af0fbeb0be074e2aab27075ed1ed61db_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,294 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c0d040d3441946e29aacca5ffbdff998_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c0d040d3441946e29aacca5ffbdff998_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,294 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cb54e2971d994205bb0337a7d113754e_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cb54e2971d994205bb0337a7d113754e_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,295 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cb8f2d4bc0d54434aee859dfb0777d0c_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cb8f2d4bc0d54434aee859dfb0777d0c_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,296 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d9e4888f0e7945ba988fda2a5201711b_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d9e4888f0e7945ba988fda2a5201711b_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,297 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120db68cf0fb614443b808df371b759c48c_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120db68cf0fb614443b808df371b759c48c_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,297 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e5d9989b9dbe4c939411b1e183c4fb9f_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e5d9989b9dbe4c939411b1e183c4fb9f_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,298 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e6b9fdf9122d4f35995358bfe057fef9_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e6b9fdf9122d4f35995358bfe057fef9_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,299 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f0943ba7d82f4f90bf5d47f546a5bcad_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f0943ba7d82f4f90bf5d47f546a5bcad_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,300 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9b58a188d4e4488a2d9334253c32193_1d38cbecc23f382ec5e2809846caa111 to hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9b58a188d4e4488a2d9334253c32193_1d38cbecc23f382ec5e2809846caa111 2024-11-20T22:26:37,300 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T22:26:37,302 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:37,303 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T22:26:37,305 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T22:26:37,306 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:37,306 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T22:26:37,306 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732141597306"}]},"ts":"9223372036854775807"} 2024-11-20T22:26:37,307 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T22:26:37,307 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1d38cbecc23f382ec5e2809846caa111, NAME => 'TestAcidGuarantees,,1732141564119.1d38cbecc23f382ec5e2809846caa111.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T22:26:37,307 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T22:26:37,307 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732141597307"}]},"ts":"9223372036854775807"} 2024-11-20T22:26:37,309 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T22:26:37,318 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T22:26:37,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 64 msec 2024-11-20T22:26:37,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35073 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T22:26:37,357 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-20T22:26:37,367 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=237 (was 238), OpenFileDescriptor=451 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1113 (was 1102) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1316 (was 866) - AvailableMemoryMB LEAK? - 2024-11-20T22:26:37,367 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T22:26:37,367 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T22:26:37,367 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d6cd448 to 127.0.0.1:51916 2024-11-20T22:26:37,367 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:37,368 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T22:26:37,368 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=957708405, stopped=false 2024-11-20T22:26:37,368 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=6365a1e51efd,35073,1732141420438 2024-11-20T22:26:37,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T22:26:37,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T22:26:37,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:26:37,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:26:37,376 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T22:26:37,377 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:37,377 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6365a1e51efd,46811,1732141422048' ***** 2024-11-20T22:26:37,377 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T22:26:37,377 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T22:26:37,377 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T22:26:37,377 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T22:26:37,378 INFO [RS:0;6365a1e51efd:46811 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T22:26:37,378 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T22:26:37,378 INFO [RS:0;6365a1e51efd:46811 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T22:26:37,378 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(3579): Received CLOSE for 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:26:37,378 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1224): stopping server 6365a1e51efd,46811,1732141422048 2024-11-20T22:26:37,378 DEBUG [RS:0;6365a1e51efd:46811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:37,378 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T22:26:37,378 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T22:26:37,378 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T22:26:37,379 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T22:26:37,379 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 523fbb796d2a39aa16176c6f447c7951, disabling compactions & flushes 2024-11-20T22:26:37,379 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:26:37,379 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:26:37,379 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. after waiting 0 ms 2024-11-20T22:26:37,379 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:26:37,379 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T22:26:37,379 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 523fbb796d2a39aa16176c6f447c7951 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T22:26:37,379 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 523fbb796d2a39aa16176c6f447c7951=hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951.} 2024-11-20T22:26:37,379 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T22:26:37,379 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T22:26:37,379 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T22:26:37,379 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T22:26:37,379 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T22:26:37,379 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T22:26:37,379 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:26:37,393 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951/.tmp/info/a6d648be78484bb5aacc7ed53fdaae6e is 45, key is default/info:d/1732141426343/Put/seqid=0 2024-11-20T22:26:37,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742553_1729 (size=5037) 2024-11-20T22:26:37,397 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/info/dd5281a1acc3444bb3de2e7d68f4bf9c is 143, key is hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951./info:regioninfo/1732141426183/Put/seqid=0 2024-11-20T22:26:37,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742554_1730 (size=7725) 2024-11-20T22:26:37,423 INFO [regionserver/6365a1e51efd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T22:26:37,423 INFO [regionserver/6365a1e51efd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T22:26:37,468 INFO [regionserver/6365a1e51efd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T22:26:37,580 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:26:37,780 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 523fbb796d2a39aa16176c6f447c7951 2024-11-20T22:26:37,796 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951/.tmp/info/a6d648be78484bb5aacc7ed53fdaae6e 2024-11-20T22:26:37,799 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951/.tmp/info/a6d648be78484bb5aacc7ed53fdaae6e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951/info/a6d648be78484bb5aacc7ed53fdaae6e 2024-11-20T22:26:37,802 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951/info/a6d648be78484bb5aacc7ed53fdaae6e, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T22:26:37,803 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 523fbb796d2a39aa16176c6f447c7951 in 423ms, sequenceid=6, compaction requested=false 2024-11-20T22:26:37,803 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/info/dd5281a1acc3444bb3de2e7d68f4bf9c 2024-11-20T22:26:37,805 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/namespace/523fbb796d2a39aa16176c6f447c7951/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T22:26:37,806 INFO [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:26:37,806 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 523fbb796d2a39aa16176c6f447c7951: 2024-11-20T22:26:37,806 DEBUG [RS_CLOSE_REGION-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732141425562.523fbb796d2a39aa16176c6f447c7951. 2024-11-20T22:26:37,819 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/rep_barrier/6a5d9a51649b43728ddb7c8c1089a90e is 102, key is TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90./rep_barrier:/1732141454277/DeleteFamily/seqid=0 2024-11-20T22:26:37,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742555_1731 (size=6025) 2024-11-20T22:26:37,980 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T22:26:38,181 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T22:26:38,223 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/rep_barrier/6a5d9a51649b43728ddb7c8c1089a90e 2024-11-20T22:26:38,247 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/table/e81d2c47a5fa4f1fbb8d1cbdf48044a7 is 96, key is TestAcidGuarantees,,1732141426656.175bc25ef8aacc6207ddcddcc7da4d90./table:/1732141454277/DeleteFamily/seqid=0 2024-11-20T22:26:38,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742556_1732 (size=5942) 2024-11-20T22:26:38,381 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-20T22:26:38,381 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T22:26:38,381 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T22:26:38,581 DEBUG [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T22:26:38,651 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/table/e81d2c47a5fa4f1fbb8d1cbdf48044a7 2024-11-20T22:26:38,654 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/info/dd5281a1acc3444bb3de2e7d68f4bf9c as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/info/dd5281a1acc3444bb3de2e7d68f4bf9c 2024-11-20T22:26:38,658 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/info/dd5281a1acc3444bb3de2e7d68f4bf9c, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T22:26:38,658 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/rep_barrier/6a5d9a51649b43728ddb7c8c1089a90e as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/rep_barrier/6a5d9a51649b43728ddb7c8c1089a90e 2024-11-20T22:26:38,664 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/rep_barrier/6a5d9a51649b43728ddb7c8c1089a90e, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T22:26:38,665 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/.tmp/table/e81d2c47a5fa4f1fbb8d1cbdf48044a7 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/table/e81d2c47a5fa4f1fbb8d1cbdf48044a7 2024-11-20T22:26:38,668 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/table/e81d2c47a5fa4f1fbb8d1cbdf48044a7, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T22:26:38,668 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1289ms, sequenceid=93, compaction requested=false 2024-11-20T22:26:38,673 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T22:26:38,674 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T22:26:38,674 INFO [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T22:26:38,674 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T22:26:38,674 DEBUG [RS_CLOSE_META-regionserver/6365a1e51efd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T22:26:38,781 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1250): stopping server 6365a1e51efd,46811,1732141422048; all regions closed. 2024-11-20T22:26:38,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741834_1010 (size=26050) 2024-11-20T22:26:38,986 DEBUG [RS:0;6365a1e51efd:46811 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/oldWALs 2024-11-20T22:26:38,986 INFO [RS:0;6365a1e51efd:46811 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6365a1e51efd%2C46811%2C1732141422048.meta:.meta(num 1732141425118) 2024-11-20T22:26:38,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741833_1009 (size=16438710) 2024-11-20T22:26:39,004 DEBUG [RS:0;6365a1e51efd:46811 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/oldWALs 2024-11-20T22:26:39,004 INFO [RS:0;6365a1e51efd:46811 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6365a1e51efd%2C46811%2C1732141422048:(num 1732141424595) 2024-11-20T22:26:39,004 DEBUG [RS:0;6365a1e51efd:46811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:39,004 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T22:26:39,004 INFO [RS:0;6365a1e51efd:46811 {}] hbase.ChoreService(370): Chore service for: regionserver/6365a1e51efd:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T22:26:39,005 INFO [regionserver/6365a1e51efd:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T22:26:39,005 INFO [RS:0;6365a1e51efd:46811 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46811 2024-11-20T22:26:39,163 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T22:26:39,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T22:26:39,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6365a1e51efd,46811,1732141422048 2024-11-20T22:26:39,375 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6365a1e51efd,46811,1732141422048] 2024-11-20T22:26:39,375 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6365a1e51efd,46811,1732141422048; numProcessing=1 2024-11-20T22:26:39,393 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6365a1e51efd,46811,1732141422048 already deleted, retry=false 2024-11-20T22:26:39,393 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6365a1e51efd,46811,1732141422048 expired; onlineServers=0 2024-11-20T22:26:39,393 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6365a1e51efd,35073,1732141420438' ***** 2024-11-20T22:26:39,393 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T22:26:39,393 DEBUG [M:0;6365a1e51efd:35073 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@784d5c19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6365a1e51efd/172.17.0.2:0 2024-11-20T22:26:39,393 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HRegionServer(1224): stopping server 6365a1e51efd,35073,1732141420438 2024-11-20T22:26:39,394 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HRegionServer(1250): stopping server 6365a1e51efd,35073,1732141420438; all regions closed. 2024-11-20T22:26:39,394 DEBUG [M:0;6365a1e51efd:35073 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T22:26:39,394 DEBUG [M:0;6365a1e51efd:35073 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T22:26:39,394 DEBUG [M:0;6365a1e51efd:35073 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T22:26:39,394 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T22:26:39,394 DEBUG [master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.small.0-1732141424219 {}] cleaner.HFileCleaner(306): Exit Thread[master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.small.0-1732141424219,5,FailOnTimeoutGroup] 2024-11-20T22:26:39,394 DEBUG [master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.large.0-1732141424217 {}] cleaner.HFileCleaner(306): Exit Thread[master/6365a1e51efd:0:becomeActiveMaster-HFileCleaner.large.0-1732141424217,5,FailOnTimeoutGroup] 2024-11-20T22:26:39,394 INFO [M:0;6365a1e51efd:35073 {}] hbase.ChoreService(370): Chore service for: master/6365a1e51efd:0 had [] on shutdown 2024-11-20T22:26:39,395 DEBUG [M:0;6365a1e51efd:35073 {}] master.HMaster(1733): Stopping service threads 2024-11-20T22:26:39,395 INFO [M:0;6365a1e51efd:35073 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T22:26:39,395 ERROR [M:0;6365a1e51efd:35073 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (618324300) connection to localhost/127.0.0.1:46027 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:46027,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T22:26:39,396 INFO [M:0;6365a1e51efd:35073 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T22:26:39,396 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T22:26:39,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T22:26:39,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T22:26:39,410 DEBUG [M:0;6365a1e51efd:35073 {}] zookeeper.ZKUtil(347): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T22:26:39,410 WARN [M:0;6365a1e51efd:35073 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T22:26:39,410 INFO [M:0;6365a1e51efd:35073 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T22:26:39,410 INFO [M:0;6365a1e51efd:35073 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T22:26:39,410 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T22:26:39,411 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T22:26:39,411 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:26:39,411 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:26:39,411 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T22:26:39,411 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:26:39,411 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=816.49 KB heapSize=1007.52 KB 2024-11-20T22:26:39,440 DEBUG [M:0;6365a1e51efd:35073 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc66a90d0a747898484bdc5b7b86c40 is 82, key is hbase:meta,,1/info:regioninfo/1732141425339/Put/seqid=0 2024-11-20T22:26:39,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742557_1733 (size=5672) 2024-11-20T22:26:39,443 INFO [M:0;6365a1e51efd:35073 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2372 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc66a90d0a747898484bdc5b7b86c40 2024-11-20T22:26:39,474 DEBUG [M:0;6365a1e51efd:35073 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b18a91e811324ed48c1cf31f092b3462 is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x98/proc:d/1732141566233/Put/seqid=0 2024-11-20T22:26:39,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T22:26:39,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46811-0x1015ba22db50001, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T22:26:39,475 INFO [RS:0;6365a1e51efd:46811 {}] regionserver.HRegionServer(1307): Exiting; stopping=6365a1e51efd,46811,1732141422048; zookeeper connection closed. 2024-11-20T22:26:39,475 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1099bf27 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1099bf27 2024-11-20T22:26:39,475 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T22:26:39,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742558_1734 (size=45047) 2024-11-20T22:26:39,480 INFO [M:0;6365a1e51efd:35073 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=815.94 KB at sequenceid=2372 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b18a91e811324ed48c1cf31f092b3462 2024-11-20T22:26:39,482 INFO [M:0;6365a1e51efd:35073 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b18a91e811324ed48c1cf31f092b3462 2024-11-20T22:26:39,500 DEBUG [M:0;6365a1e51efd:35073 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1718d2b78a5743b3a306bd2dc3524b4b is 69, key is 6365a1e51efd,46811,1732141422048/rs:state/1732141424259/Put/seqid=0 2024-11-20T22:26:39,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073742559_1735 (size=5156) 2024-11-20T22:26:39,513 INFO [M:0;6365a1e51efd:35073 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2372 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1718d2b78a5743b3a306bd2dc3524b4b 2024-11-20T22:26:39,516 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc66a90d0a747898484bdc5b7b86c40 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4dc66a90d0a747898484bdc5b7b86c40 2024-11-20T22:26:39,520 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4dc66a90d0a747898484bdc5b7b86c40, entries=8, sequenceid=2372, filesize=5.5 K 2024-11-20T22:26:39,520 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b18a91e811324ed48c1cf31f092b3462 as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b18a91e811324ed48c1cf31f092b3462 2024-11-20T22:26:39,530 INFO [M:0;6365a1e51efd:35073 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b18a91e811324ed48c1cf31f092b3462 2024-11-20T22:26:39,530 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b18a91e811324ed48c1cf31f092b3462, entries=179, sequenceid=2372, filesize=44.0 K 2024-11-20T22:26:39,532 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1718d2b78a5743b3a306bd2dc3524b4b as hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1718d2b78a5743b3a306bd2dc3524b4b 2024-11-20T22:26:39,537 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46027/user/jenkins/test-data/e440af12-839c-f478-f0cf-67d5e61b7c72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1718d2b78a5743b3a306bd2dc3524b4b, entries=1, sequenceid=2372, filesize=5.0 K 2024-11-20T22:26:39,538 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(3040): Finished flush of dataSize ~816.49 KB/836090, heapSize ~1007.23 KB/1031400, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=2372, compaction requested=false 2024-11-20T22:26:39,568 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T22:26:39,568 DEBUG [M:0;6365a1e51efd:35073 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T22:26:39,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39471 is added to blk_1073741830_1006 (size=991509) 2024-11-20T22:26:39,571 INFO [M:0;6365a1e51efd:35073 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T22:26:39,571 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T22:26:39,571 INFO [M:0;6365a1e51efd:35073 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35073 2024-11-20T22:26:39,579 DEBUG [M:0;6365a1e51efd:35073 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/6365a1e51efd,35073,1732141420438 already deleted, retry=false 2024-11-20T22:26:39,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T22:26:39,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35073-0x1015ba22db50000, quorum=127.0.0.1:51916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T22:26:39,688 INFO [M:0;6365a1e51efd:35073 {}] regionserver.HRegionServer(1307): Exiting; stopping=6365a1e51efd,35073,1732141420438; zookeeper connection closed. 2024-11-20T22:26:39,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d30b7f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T22:26:39,703 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61692e60{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T22:26:39,703 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T22:26:39,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@608de7d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T22:26:39,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1535b9ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/hadoop.log.dir/,STOPPED} 2024-11-20T22:26:39,709 WARN [BP-1651373866-172.17.0.2-1732141414202 heartbeating to localhost/127.0.0.1:46027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T22:26:39,709 WARN [BP-1651373866-172.17.0.2-1732141414202 heartbeating to localhost/127.0.0.1:46027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1651373866-172.17.0.2-1732141414202 (Datanode Uuid 38ad54c8-a252-4485-8e9a-2cc07cf514a6) service to localhost/127.0.0.1:46027 2024-11-20T22:26:39,709 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T22:26:39,710 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T22:26:39,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/dfs/data/data1/current/BP-1651373866-172.17.0.2-1732141414202 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T22:26:39,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/cluster_ef62e46f-019e-73fb-a4e7-fe10e0d6f252/dfs/data/data2/current/BP-1651373866-172.17.0.2-1732141414202 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T22:26:39,714 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T22:26:39,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5786ce49{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T22:26:39,728 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b71dca4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T22:26:39,728 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T22:26:39,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e8672f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T22:26:39,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ff7a6fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/71bbd582-14b3-ca1f-1b28-81e98675eb0c/hadoop.log.dir/,STOPPED} 2024-11-20T22:26:39,755 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T22:26:40,006 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down